gpu: ion: Add support for cached and uncached mappings

Add explicit support for cached and uncached mappings. Functions
now describe whether mappings will be cached or uncached.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
Laura Abbott
2011-08-19 13:33:56 -07:00
committed by Bryan Huntsman
parent d618379341
commit 894fd58ea8
5 changed files with 122 additions and 24 deletions

View File

@@ -407,7 +407,8 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
return ret; return ret;
} }
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
unsigned long flags)
{ {
struct ion_buffer *buffer; struct ion_buffer *buffer;
void *vaddr; void *vaddr;
@@ -431,21 +432,38 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
if (buffer->flags != flags) {
pr_err("%s: buffer was already mapped with flags %lx,"
" cannot map with flags %lx\n", __func__,
buffer->flags, flags);
vaddr = ERR_PTR(-EEXIST);
goto out;
}
} else {
buffer->flags = flags;
}
if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
flags);
if (IS_ERR_OR_NULL(vaddr)) if (IS_ERR_OR_NULL(vaddr))
_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
buffer->vaddr = vaddr; buffer->vaddr = vaddr;
} else { } else {
vaddr = buffer->vaddr; vaddr = buffer->vaddr;
} }
out:
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock); mutex_unlock(&client->lock);
return vaddr; return vaddr;
} }
struct scatterlist *ion_map_dma(struct ion_client *client, struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle) struct ion_handle *handle,
unsigned long flags)
{ {
struct ion_buffer *buffer; struct ion_buffer *buffer;
struct scatterlist *sglist; struct scatterlist *sglist;
@@ -467,6 +485,20 @@ struct scatterlist *ion_map_dma(struct ion_client *client,
mutex_unlock(&client->lock); mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
if (buffer->flags != flags) {
pr_err("%s: buffer was already mapped with flags %lx,"
" cannot map with flags %lx\n", __func__,
buffer->flags, flags);
sglist = ERR_PTR(-EEXIST);
goto out;
}
} else {
buffer->flags = flags;
}
if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
if (IS_ERR_OR_NULL(sglist)) if (IS_ERR_OR_NULL(sglist))
@@ -475,6 +507,8 @@ struct scatterlist *ion_map_dma(struct ion_client *client,
} else { } else {
sglist = buffer->sglist; sglist = buffer->sglist;
} }
out:
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock); mutex_unlock(&client->lock);
return sglist; return sglist;
@@ -774,6 +808,9 @@ static int ion_share_release(struct inode *inode, struct file* file)
struct ion_buffer *buffer = file->private_data; struct ion_buffer *buffer = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__); pr_debug("%s: %d\n", __func__, __LINE__);
mutex_lock(&buffer->lock);
buffer->umap_cnt--;
mutex_unlock(&buffer->lock);
/* drop the reference to the buffer -- this prevents the /* drop the reference to the buffer -- this prevents the
buffer from going away because the client holding it exited buffer from going away because the client holding it exited
while it was being passed */ while it was being passed */
@@ -840,6 +877,10 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
struct ion_client *client; struct ion_client *client;
struct ion_handle *handle; struct ion_handle *handle;
int ret; int ret;
unsigned long flags = file->f_flags & O_DSYNC ?
ION_SET_CACHE(UNCACHED) :
ION_SET_CACHE(CACHED);
pr_debug("%s: %d\n", __func__, __LINE__); pr_debug("%s: %d\n", __func__, __LINE__);
/* make sure the client still exists, it's possible for the client to /* make sure the client still exists, it's possible for the client to
@@ -875,13 +916,28 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
} }
mutex_lock(&buffer->lock); mutex_lock(&buffer->lock);
if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
if (buffer->flags != flags) {
pr_err("%s: buffer was already mapped with flags %lx,"
" cannot map with flags %lx\n", __func__,
buffer->flags, flags);
ret = -EEXIST;
mutex_unlock(&buffer->lock);
goto err1;
}
} else {
buffer->flags = flags;
}
/* now map it to userspace */ /* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
flags);
buffer->umap_cnt++;
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
if (ret) { if (ret) {
pr_err("%s: failure mapping buffer to userspace\n", pr_err("%s: failure mapping buffer to userspace\n",
__func__); __func__);
goto err1; goto err2;
} }
vma->vm_ops = &ion_vm_ops; vma->vm_ops = &ion_vm_ops;
@@ -895,8 +951,10 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
atomic_read(&buffer->ref.refcount)); atomic_read(&buffer->ref.refcount));
return 0; return 0;
err1: err2:
buffer->umap_cnt--;
/* drop the reference to the handle */ /* drop the reference to the handle */
err1:
ion_handle_put(handle); ion_handle_put(handle);
err: err:
/* drop the reference to the client */ /* drop the reference to the client */

View File

@@ -97,10 +97,13 @@ void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
} }
void *ion_carveout_heap_map_kernel(struct ion_heap *heap, void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer) struct ion_buffer *buffer,
unsigned long flags)
{ {
return __arch_ioremap(buffer->priv_phys, buffer->size, if (flags & ION_SET_CACHE(CACHED))
MT_MEMORY_NONCACHED); return ioremap(buffer->priv_phys, buffer->size);
else
return ioremap_cached(buffer->priv_phys, buffer->size);
} }
void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
@@ -112,8 +115,14 @@ void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
} }
int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma) struct vm_area_struct *vma, unsigned long flags)
{ {
if (flags & ION_SET_CACHE(CACHED))
return remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
buffer->size,
vma->vm_page_prot);
else
return remap_pfn_range(vma, vma->vm_start, return remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
buffer->size, buffer->size,

View File

@@ -71,6 +71,7 @@ struct ion_buffer {
void *vaddr; void *vaddr;
int dmap_cnt; int dmap_cnt;
struct scatterlist *sglist; struct scatterlist *sglist;
int umap_cnt;
}; };
/** /**
@@ -95,10 +96,11 @@ struct ion_heap_ops {
struct scatterlist *(*map_dma) (struct ion_heap *heap, struct scatterlist *(*map_dma) (struct ion_heap *heap,
struct ion_buffer *buffer); struct ion_buffer *buffer);
void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long flags);
void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma); struct vm_area_struct *vma, unsigned long flags);
}; };
/** /**

View File

@@ -75,9 +75,15 @@ void ion_system_heap_unmap_dma(struct ion_heap *heap,
} }
void *ion_system_heap_map_kernel(struct ion_heap *heap, void *ion_system_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer) struct ion_buffer *buffer,
unsigned long flags)
{ {
if (flags & ION_SET_CACHE(CACHED))
return buffer->priv_virt; return buffer->priv_virt;
else {
pr_err("%s: cannot map system heap uncached\n", __func__);
return ERR_PTR(-EINVAL);
}
} }
void ion_system_heap_unmap_kernel(struct ion_heap *heap, void ion_system_heap_unmap_kernel(struct ion_heap *heap,
@@ -86,9 +92,15 @@ void ion_system_heap_unmap_kernel(struct ion_heap *heap,
} }
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma) struct vm_area_struct *vma, unsigned long flags)
{ {
return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); if (flags & ION_SET_CACHE(CACHED))
return remap_vmalloc_range(vma, buffer->priv_virt,
vma->vm_pgoff);
else {
pr_err("%s: cannot map system heap uncached\n", __func__);
return -EINVAL;
}
} }
static struct ion_heap_ops vmalloc_ops = { static struct ion_heap_ops vmalloc_ops = {
@@ -159,13 +171,19 @@ struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
int ion_system_contig_heap_map_user(struct ion_heap *heap, int ion_system_contig_heap_map_user(struct ion_heap *heap,
struct ion_buffer *buffer, struct ion_buffer *buffer,
struct vm_area_struct *vma) struct vm_area_struct *vma,
unsigned long flags)
{ {
unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
if (flags & ION_SET_CACHE(CACHED))
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot); vma->vm_page_prot);
else {
pr_err("%s: cannot map system heap uncached\n", __func__);
return -EINVAL;
}
} }
static struct ion_heap_ops kmalloc_ops = { static struct ion_heap_ops kmalloc_ops = {

View File

@@ -62,6 +62,13 @@ enum ion_heap_ids {
#define ION_VMALLOC_HEAP_NAME "vmalloc" #define ION_VMALLOC_HEAP_NAME "vmalloc"
#define ION_EBI1_HEAP_NAME "EBI1" #define ION_EBI1_HEAP_NAME "EBI1"
#define CACHED 1
#define UNCACHED 0
#define ION_CACHE_SHIFT 0
#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT)
#ifdef __KERNEL__ #ifdef __KERNEL__
struct ion_device; struct ion_device;
struct ion_heap; struct ion_heap;
@@ -183,11 +190,14 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
* ion_map_kernel - create mapping for the given handle * ion_map_kernel - create mapping for the given handle
* @client: the client * @client: the client
* @handle: handle to map * @handle: handle to map
* @flags: flags for this mapping
* *
* Map the given handle into the kernel and return a kernel address that * Map the given handle into the kernel and return a kernel address that
* can be used to access this address. * can be used to access this address. If no flags are specified, this
* will return a non-secure uncached mapping.
*/ */
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
unsigned long flags);
/** /**
* ion_unmap_kernel() - destroy a kernel mapping for a handle * ion_unmap_kernel() - destroy a kernel mapping for a handle
@@ -204,7 +214,8 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
* Return an sglist describing the given handle * Return an sglist describing the given handle
*/ */
struct scatterlist *ion_map_dma(struct ion_client *client, struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle); struct ion_handle *handle,
unsigned long flags);
/** /**
* ion_unmap_dma() - destroy a dma mapping for a handle * ion_unmap_dma() - destroy a dma mapping for a handle