diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 37b23af0550..d4ca7625407 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -407,7 +407,8 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle, return ret; } -void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, + unsigned long flags) { struct ion_buffer *buffer; void *vaddr; @@ -431,21 +432,38 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) return ERR_PTR(-ENODEV); } + if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) { + if (buffer->flags != flags) { + pr_err("%s: buffer was already mapped with flags %lx," + " cannot map with flags %lx\n", __func__, + buffer->flags, flags); + vaddr = ERR_PTR(-EEXIST); + goto out; + } + + } else { + buffer->flags = flags; + } + if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { - vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer, + flags); if (IS_ERR_OR_NULL(vaddr)) _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); buffer->vaddr = vaddr; } else { vaddr = buffer->vaddr; } + +out: mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return vaddr; } struct scatterlist *ion_map_dma(struct ion_client *client, - struct ion_handle *handle) + struct ion_handle *handle, + unsigned long flags) { struct ion_buffer *buffer; struct scatterlist *sglist; @@ -467,6 +485,20 @@ struct scatterlist *ion_map_dma(struct ion_client *client, mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } + + if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) { + if (buffer->flags != flags) { + pr_err("%s: buffer was already mapped with flags %lx," + " cannot map with flags %lx\n", __func__, + buffer->flags, flags); + sglist = ERR_PTR(-EEXIST); + goto out; + } + + } else { + buffer->flags = flags; + } + if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); if (IS_ERR_OR_NULL(sglist)) @@ -475,6 +507,8 @@ struct scatterlist *ion_map_dma(struct ion_client *client, } else { sglist = buffer->sglist; } + +out: mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return sglist; @@ -774,6 +808,9 @@ static int ion_share_release(struct inode *inode, struct file* file) struct ion_buffer *buffer = file->private_data; pr_debug("%s: %d\n", __func__, __LINE__); + mutex_lock(&buffer->lock); + buffer->umap_cnt--; + mutex_unlock(&buffer->lock); /* drop the reference to the buffer -- this prevents the buffer from going away because the client holding it exited while it was being passed */ @@ -840,6 +877,10 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) struct ion_client *client; struct ion_handle *handle; int ret; + unsigned long flags = file->f_flags & O_DSYNC ? + ION_SET_CACHE(UNCACHED) : + ION_SET_CACHE(CACHED); + pr_debug("%s: %d\n", __func__, __LINE__); /* make sure the client still exists, it's possible for the client to @@ -875,13 +916,28 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) } mutex_lock(&buffer->lock); + if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) { + if (buffer->flags != flags) { + pr_err("%s: buffer was already mapped with flags %lx," + " cannot map with flags %lx\n", __func__, + buffer->flags, flags); + ret = -EEXIST; + mutex_unlock(&buffer->lock); + goto err1; + } + + } else { + buffer->flags = flags; + } /* now map it to userspace */ - ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma, + flags); + buffer->umap_cnt++; mutex_unlock(&buffer->lock); if (ret) { pr_err("%s: failure mapping buffer to userspace\n", __func__); - goto err1; + goto err2; } vma->vm_ops = &ion_vm_ops; @@ -895,8 +951,10 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) atomic_read(&buffer->ref.refcount)); return 0; -err1: +err2: + buffer->umap_cnt--; /* drop the reference to the handle */ +err1: ion_handle_put(handle); err: /* drop the reference to the client */ diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c index 494967711f3..a50e697b70d 100644 --- a/drivers/gpu/ion/ion_carveout_heap.c +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -97,10 +97,13 @@ void ion_carveout_heap_unmap_dma(struct ion_heap *heap, } void *ion_carveout_heap_map_kernel(struct ion_heap *heap, - struct ion_buffer *buffer) + struct ion_buffer *buffer, + unsigned long flags) { - return __arch_ioremap(buffer->priv_phys, buffer->size, - MT_MEMORY_NONCACHED); + if (flags & ION_SET_CACHE(CACHED)) + return ioremap(buffer->priv_phys, buffer->size); + else + return ioremap_cached(buffer->priv_phys, buffer->size); } void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, @@ -112,12 +115,18 @@ void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, } int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, - struct vm_area_struct *vma) + struct vm_area_struct *vma, unsigned long flags) { - return remap_pfn_range(vma, vma->vm_start, + if (flags & ION_SET_CACHE(CACHED)) + return remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, buffer->size, - pgprot_noncached(vma->vm_page_prot)); + vma->vm_page_prot); + else + return remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, + buffer->size, + pgprot_noncached(vma->vm_page_prot)); } static struct ion_heap_ops carveout_heap_ops = { diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h index 3323954c03a..581abe5ce6b 100644 --- a/drivers/gpu/ion/ion_priv.h +++ b/drivers/gpu/ion/ion_priv.h @@ -71,6 +71,7 @@ struct ion_buffer { void *vaddr; int dmap_cnt; struct scatterlist *sglist; + int umap_cnt; }; /** @@ -95,10 +96,11 @@ struct ion_heap_ops { struct scatterlist *(*map_dma) (struct ion_heap *heap, struct ion_buffer *buffer); void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); - void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long flags); void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, - struct vm_area_struct *vma); + struct vm_area_struct *vma, unsigned long flags); }; /** diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c index c046cf1a321..b34b4554193 100644 --- a/drivers/gpu/ion/ion_system_heap.c +++ b/drivers/gpu/ion/ion_system_heap.c @@ -75,9 +75,15 @@ void ion_system_heap_unmap_dma(struct ion_heap *heap, } void *ion_system_heap_map_kernel(struct ion_heap *heap, - struct ion_buffer *buffer) + struct ion_buffer *buffer, + unsigned long flags) { - return buffer->priv_virt; + if (flags & ION_SET_CACHE(CACHED)) + return buffer->priv_virt; + else { + pr_err("%s: cannot map system heap uncached\n", __func__); + return ERR_PTR(-EINVAL); + } } void ion_system_heap_unmap_kernel(struct ion_heap *heap, @@ -86,9 +92,15 @@ void ion_system_heap_unmap_kernel(struct ion_heap *heap, } int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, - struct vm_area_struct *vma) + struct vm_area_struct *vma, unsigned long flags) { - return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); + if (flags & ION_SET_CACHE(CACHED)) + return remap_vmalloc_range(vma, buffer->priv_virt, + vma->vm_pgoff); + else { + pr_err("%s: cannot map system heap uncached\n", __func__); + return -EINVAL; + } } static struct ion_heap_ops vmalloc_ops = { @@ -159,13 +171,19 @@ struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, int ion_system_contig_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, - struct vm_area_struct *vma) + struct vm_area_struct *vma, + unsigned long flags) { unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); - return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + + if (flags & ION_SET_CACHE(CACHED)) + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); - + else { + pr_err("%s: cannot map system heap uncached\n", __func__); + return -EINVAL; + } } static struct ion_heap_ops kmalloc_ops = { diff --git a/include/linux/ion.h b/include/linux/ion.h index d6dcf3803f4..91cbbdab361 100644 --- a/include/linux/ion.h +++ b/include/linux/ion.h @@ -62,6 +62,13 @@ enum ion_heap_ids { #define ION_VMALLOC_HEAP_NAME "vmalloc" #define ION_EBI1_HEAP_NAME "EBI1" +#define CACHED 1 +#define UNCACHED 0 + +#define ION_CACHE_SHIFT 0 + +#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT) + #ifdef __KERNEL__ struct ion_device; struct ion_heap; @@ -183,11 +190,14 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle, * ion_map_kernel - create mapping for the given handle * @client: the client * @handle: handle to map + * @flags: flags for this mapping * * Map the given handle into the kernel and return a kernel address that - * can be used to access this address. + * can be used to access this address. If no flags are specified, this + * will return a non-secure uncached mapping. */ -void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, + unsigned long flags); /** * ion_unmap_kernel() - destroy a kernel mapping for a handle @@ -204,7 +214,8 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); * Return an sglist describing the given handle */ struct scatterlist *ion_map_dma(struct ion_client *client, - struct ion_handle *handle); + struct ion_handle *handle, + unsigned long flags); /** * ion_unmap_dma() - destroy a dma mapping for a handle