ion : Merge ion changes from msm-4.14 to msm-kona.
This patch merges changes pertinent to ion from msm-4.14 to msm-kona. Conflicts: include/linux/oom.h Change-Id: I33239643d8eb5e98f6d7529ff986db03b043da2d Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
This commit is contained in:
parent
4008eb493a
commit
bbbc80b6d8
38 changed files with 4995 additions and 226 deletions
59
Documentation/devicetree/bindings/arm/msm/msm_ion.txt
Normal file
59
Documentation/devicetree/bindings/arm/msm/msm_ion.txt
Normal file
|
@ -0,0 +1,59 @@
|
|||
ION Memory Manager (ION)
|
||||
|
||||
ION is a memory manager that allows for sharing of buffers between different
|
||||
processes and between user space and kernel space. ION manages different
|
||||
memory spaces by separating the memory spaces into "heaps".
|
||||
|
||||
Required properties for Ion
|
||||
|
||||
- compatible: "qcom,msm-ion"
|
||||
|
||||
|
||||
All child nodes of a qcom,msm-ion node are interpreted as Ion heap
|
||||
configurations.
|
||||
|
||||
Required properties for Ion heaps
|
||||
|
||||
- reg: The ID of the ION heap.
|
||||
- qcom,ion-heap-type: The heap type to use for this heap. Should be one of
|
||||
the following:
|
||||
- "SYSTEM"
|
||||
- "CARVEOUT"
|
||||
- "DMA"
|
||||
- "HYP_CMA"
|
||||
- "SYSTEM_SECURE"
|
||||
- "SECURE_DMA"
|
||||
|
||||
Optional properties for Ion heaps
|
||||
|
||||
- memory-region: phandle to memory region associated with heap.
|
||||
|
||||
Example:
|
||||
qcom,ion {
|
||||
compatible = "qcom,msm-ion";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
system_heap: qcom,ion-heap@25 {
|
||||
reg = <25>;
|
||||
qcom,ion-heap-type = "SYSTEM";
|
||||
};
|
||||
|
||||
qcom,ion-heap@22 { /* ADSP HEAP */
|
||||
reg = <22>;
|
||||
memory-region = <&adsp_mem>;
|
||||
qcom,ion-heap-type = "DMA";
|
||||
};
|
||||
|
||||
qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
|
||||
reg = <10>;
|
||||
memory-region = <&secure_display_memory>;
|
||||
qcom,ion-heap-type = "HYP_CMA";
|
||||
};
|
||||
|
||||
qcom,ion-heap@9 {
|
||||
reg = <9>;
|
||||
qcom,ion-heap-type = "SYSTEM_SECURE";
|
||||
};
|
||||
|
||||
};
|
|
@ -51,6 +51,8 @@ compatible (optional) - standard definition
|
|||
used as a shared pool of DMA buffers for a set of devices. It can
|
||||
be used by an operating system to instanciate the necessary pool
|
||||
management subsystem if necessary.
|
||||
- removed-dma-pool: This indicates a region of memory which is meant to
|
||||
be carved out and not exposed to kernel.
|
||||
- vendor specific string in the form <vendor>,[<device>-]<usage>
|
||||
no-map (optional) - empty property
|
||||
- Indicates the operating system must not create a virtual mapping
|
||||
|
|
22
arch/arm64/boot/dts/qcom/kona-ion.dtsi
Normal file
22
arch/arm64/boot/dts/qcom/kona-ion.dtsi
Normal file
|
@ -0,0 +1,22 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
&soc {
|
||||
qcom,ion {
|
||||
compatible = "qcom,msm-ion";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
system_heap: qcom,ion-heap@25 {
|
||||
reg = <25>;
|
||||
qcom,ion-heap-type = "SYSTEM";
|
||||
};
|
||||
|
||||
system_secure_heap: qcom,ion-heap@9 {
|
||||
reg = <9>;
|
||||
qcom,ion-heap-type = "SYSTEM_SECURE";
|
||||
};
|
||||
};
|
||||
};
|
|
@ -348,4 +348,5 @@
|
|||
};
|
||||
};
|
||||
|
||||
#include "kona-ion.dtsi"
|
||||
#include "msm-arm-smmu-kona.dtsi"
|
||||
|
|
|
@ -34,9 +34,13 @@
|
|||
#include <linux/poll.h>
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include <uapi/linux/dma-buf.h>
|
||||
|
||||
static atomic_long_t name_counter;
|
||||
|
||||
static inline int is_dma_buf_file(struct file *);
|
||||
|
||||
struct dma_buf_list {
|
||||
|
@ -77,6 +81,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
|
|||
reservation_object_fini(dmabuf->resv);
|
||||
|
||||
module_put(dmabuf->owner);
|
||||
kfree(dmabuf->name);
|
||||
kfree(dmabuf);
|
||||
return 0;
|
||||
}
|
||||
|
@ -276,12 +281,19 @@ out:
|
|||
return events;
|
||||
}
|
||||
|
||||
static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
|
||||
static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
static long dma_buf_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct dma_buf_sync sync;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_data_direction dir;
|
||||
int ret;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
@ -296,22 +308,30 @@ static long dma_buf_ioctl(struct file *file,
|
|||
|
||||
switch (sync.flags & DMA_BUF_SYNC_RW) {
|
||||
case DMA_BUF_SYNC_READ:
|
||||
direction = DMA_FROM_DEVICE;
|
||||
dir = DMA_FROM_DEVICE;
|
||||
break;
|
||||
case DMA_BUF_SYNC_WRITE:
|
||||
direction = DMA_TO_DEVICE;
|
||||
dir = DMA_TO_DEVICE;
|
||||
break;
|
||||
case DMA_BUF_SYNC_RW:
|
||||
direction = DMA_BIDIRECTIONAL;
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sync.flags & DMA_BUF_SYNC_END)
|
||||
ret = dma_buf_end_cpu_access(dmabuf, direction);
|
||||
if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
|
||||
ret = dma_buf_end_cpu_access_umapped(dmabuf,
|
||||
dir);
|
||||
else
|
||||
ret = dma_buf_end_cpu_access(dmabuf, dir);
|
||||
else
|
||||
ret = dma_buf_begin_cpu_access(dmabuf, direction);
|
||||
if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
|
||||
ret = dma_buf_begin_cpu_access_umapped(dmabuf,
|
||||
dir);
|
||||
else
|
||||
ret = dma_buf_begin_cpu_access(dmabuf, dir);
|
||||
|
||||
return ret;
|
||||
default:
|
||||
|
@ -392,7 +412,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
struct reservation_object *resv = exp_info->resv;
|
||||
struct file *file;
|
||||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
char *bufname;
|
||||
int ret;
|
||||
long cnt;
|
||||
|
||||
if (!exp_info->resv)
|
||||
alloc_size += sizeof(struct reservation_object);
|
||||
|
@ -414,10 +436,17 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
if (!try_module_get(exp_info->owner))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
cnt = atomic_long_inc_return(&name_counter);
|
||||
bufname = kasprintf(GFP_KERNEL, "dmabuf%ld", cnt);
|
||||
if (!bufname) {
|
||||
ret = -ENOMEM;
|
||||
goto err_module;
|
||||
}
|
||||
|
||||
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!dmabuf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_module;
|
||||
goto err_name;
|
||||
}
|
||||
|
||||
dmabuf->priv = exp_info->priv;
|
||||
|
@ -428,6 +457,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
init_waitqueue_head(&dmabuf->poll);
|
||||
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
|
||||
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
|
||||
dmabuf->name = bufname;
|
||||
|
||||
if (!resv) {
|
||||
resv = (struct reservation_object *)&dmabuf[1];
|
||||
|
@ -435,7 +465,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
|
||||
file = anon_inode_getfile(bufname, &dma_buf_fops, dmabuf,
|
||||
exp_info->flags);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
|
@ -456,6 +486,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
|
||||
err_dmabuf:
|
||||
kfree(dmabuf);
|
||||
err_name:
|
||||
kfree(bufname);
|
||||
err_module:
|
||||
module_put(exp_info->owner);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -746,7 +778,8 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
|
|||
* - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
|
||||
* to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
|
||||
* want (with the new data being consumed by say the GPU or the scanout
|
||||
* device)
|
||||
* device). Optionally SYNC_USER_MAPPED can be set to restrict cache
|
||||
* maintenance to only the parts of the buffer which are mmap(ed).
|
||||
* - munmap once you don't need the buffer any more
|
||||
*
|
||||
* For correctness and optimal performance, it is always required to use
|
||||
|
@ -833,6 +866,50 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
|
||||
|
||||
static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!dmabuf))
|
||||
return -EINVAL;
|
||||
|
||||
if (dmabuf->ops->begin_cpu_access_umapped)
|
||||
ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction);
|
||||
|
||||
/* Ensure that all fences are waited upon - but we first allow
|
||||
* the native handler the chance to do so more efficiently if it
|
||||
* chooses. A double invocation here will be reasonably cheap no-op.
|
||||
*/
|
||||
if (ret == 0)
|
||||
ret = __dma_buf_begin_cpu_access(dmabuf, direction);
|
||||
|
||||
return ret;
|
||||
}
|
||||
int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset, unsigned int len)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!dmabuf))
|
||||
return -EINVAL;
|
||||
|
||||
if (dmabuf->ops->begin_cpu_access_partial)
|
||||
ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
|
||||
offset, len);
|
||||
|
||||
/* Ensure that all fences are waited upon - but we first allow
|
||||
* the native handler the chance to do so more efficiently if it
|
||||
* chooses. A double invocation here will be reasonably cheap no-op.
|
||||
*/
|
||||
if (ret == 0)
|
||||
ret = __dma_buf_begin_cpu_access(dmabuf, direction);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_buf_begin_cpu_access_partial);
|
||||
|
||||
/**
|
||||
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
|
||||
* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
|
||||
|
@ -859,6 +936,35 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
|
||||
|
||||
int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
if (dmabuf->ops->end_cpu_access_umapped)
|
||||
ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset, unsigned int len)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
if (dmabuf->ops->end_cpu_access_partial)
|
||||
ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
|
||||
offset, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_buf_end_cpu_access_partial);
|
||||
|
||||
/**
|
||||
* dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
|
||||
* space. The same restrictions as for kmap_atomic and friends apply.
|
||||
|
@ -1053,6 +1159,20 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
|
||||
|
||||
int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!dmabuf))
|
||||
return -EINVAL;
|
||||
|
||||
if (dmabuf->ops->get_flags)
|
||||
ret = dmabuf->ops->get_flags(dmabuf, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_buf_get_flags);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
|
@ -1072,8 +1192,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
|||
return ret;
|
||||
|
||||
seq_puts(s, "\nDma-buf Objects:\n");
|
||||
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
|
||||
"size", "flags", "mode", "count");
|
||||
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-12s\t%-s\n",
|
||||
"size", "flags", "mode", "count", "exp_name", "buf name");
|
||||
|
||||
list_for_each_entry(buf_obj, &db_list.head, list_node) {
|
||||
ret = mutex_lock_interruptible(&buf_obj->lock);
|
||||
|
@ -1084,11 +1204,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
|||
continue;
|
||||
}
|
||||
|
||||
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
|
||||
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-12s\t%-s\n",
|
||||
buf_obj->size,
|
||||
buf_obj->file->f_flags, buf_obj->file->f_mode,
|
||||
file_count(buf_obj->file),
|
||||
buf_obj->exp_name);
|
||||
buf_obj->exp_name, buf_obj->name);
|
||||
|
||||
robj = buf_obj->resv;
|
||||
while (true) {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/of_iommu.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
const struct of_device_id of_default_bus_match_table[] = {
|
||||
|
@ -188,6 +189,7 @@ static struct platform_device *of_platform_device_create_pdata(
|
|||
dev->dev.bus = &platform_bus_type;
|
||||
dev->dev.platform_data = platform_data;
|
||||
of_msi_configure(&dev->dev, dev->dev.of_node);
|
||||
of_reserved_mem_device_init_by_idx(&dev->dev, dev->dev.of_node, 0);
|
||||
|
||||
if (of_device_add(dev) != 0) {
|
||||
platform_device_put(dev);
|
||||
|
|
|
@ -3,6 +3,7 @@ menuconfig ION
|
|||
depends on HAVE_MEMBLOCK && HAS_DMA && MMU
|
||||
select GENERIC_ALLOCATOR
|
||||
select DMA_SHARED_BUFFER
|
||||
select MSM_SECURE_BUFFER
|
||||
help
|
||||
Choose this option to enable the ION Memory Manager,
|
||||
used by Android to efficiently allocate buffers
|
||||
|
@ -42,3 +43,15 @@ config ION_CMA_HEAP
|
|||
Choose this option to enable CMA heaps with Ion. This heap is backed
|
||||
by the Contiguous Memory Allocator (CMA). If your system has these
|
||||
regions, you should say Y here.
|
||||
|
||||
config ION_FORCE_DMA_SYNC
|
||||
bool "Force ION to always DMA sync buffer memory"
|
||||
depends on ION
|
||||
help
|
||||
Force ION to DMA sync buffer memory when it is allocated and to
|
||||
always DMA sync the buffer memory on calls to begin/end cpu
|
||||
access. This makes ION DMA sync behavior similar to that of the
|
||||
older version of ION.
|
||||
We generally don't want to enable this config as it breaks the
|
||||
cache maintenance model.
|
||||
If you're not sure say N here.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o
|
||||
obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
|
||||
obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
|
||||
obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
|
||||
obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
|
||||
obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
|
||||
ion_page_pool.o ion_system_heap.o \
|
||||
ion_carveout_heap.o ion_chunk_heap.o \
|
||||
ion_system_secure_heap.o ion_cma_heap.o \
|
||||
ion_secure_util.o ion_cma_secure_heap.o msm/
|
||||
|
||||
|
|
|
@ -9,10 +9,12 @@
|
|||
#include <linux/uaccess.h>
|
||||
|
||||
#include "ion.h"
|
||||
#include "ion_system_secure_heap.h"
|
||||
|
||||
union ion_ioctl_arg {
|
||||
struct ion_allocation_data allocation;
|
||||
struct ion_heap_query query;
|
||||
struct ion_prefetch_data prefetch_data;
|
||||
};
|
||||
|
||||
static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
|
||||
|
@ -73,9 +75,9 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
{
|
||||
int fd;
|
||||
|
||||
fd = ion_alloc(data.allocation.len,
|
||||
data.allocation.heap_id_mask,
|
||||
data.allocation.flags);
|
||||
fd = ion_alloc_fd(data.allocation.len,
|
||||
data.allocation.heap_id_mask,
|
||||
data.allocation.flags);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
|
@ -86,6 +88,33 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
case ION_IOC_HEAP_QUERY:
|
||||
ret = ion_query_heaps(&data.query);
|
||||
break;
|
||||
case ION_IOC_PREFETCH:
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ion_walk_heaps(data.prefetch_data.heap_id,
|
||||
(enum ion_heap_type)
|
||||
ION_HEAP_TYPE_SYSTEM_SECURE,
|
||||
(void *)&data.prefetch_data,
|
||||
ion_system_secure_heap_prefetch);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
case ION_IOC_DRAIN:
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ion_walk_heaps(data.prefetch_data.heap_id,
|
||||
(enum ion_heap_type)
|
||||
ION_HEAP_TYPE_SYSTEM_SECURE,
|
||||
(void *)&data.prefetch_data,
|
||||
ion_system_secure_heap_drain);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
* drivers/staging/android/ion/ion.c
|
||||
*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/anon_inodes.h>
|
||||
|
@ -27,11 +29,44 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/msm_dma_iommu_mapping.h>
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/ion.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
|
||||
#include "ion.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
static struct ion_device *internal_dev;
|
||||
static int heap_id;
|
||||
|
||||
int ion_walk_heaps(int heap_id, enum ion_heap_type type, void *data,
|
||||
int (*f)(struct ion_heap *heap, void *data))
|
||||
{
|
||||
int ret_val = 0;
|
||||
struct ion_heap *heap;
|
||||
struct ion_device *dev = internal_dev;
|
||||
/*
|
||||
* traverse the list of heaps available in this system
|
||||
* and find the heap that is specified.
|
||||
*/
|
||||
down_write(&dev->lock);
|
||||
plist_for_each_entry(heap, &dev->heaps, node) {
|
||||
if (ION_HEAP(heap->id) != heap_id ||
|
||||
type != heap->type)
|
||||
continue;
|
||||
ret_val = f(heap, data);
|
||||
break;
|
||||
}
|
||||
up_write(&dev->lock);
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(ion_walk_heaps);
|
||||
|
||||
bool ion_buffer_cached(struct ion_buffer *buffer)
|
||||
{
|
||||
return !!(buffer->flags & ION_FLAG_CACHED);
|
||||
}
|
||||
|
||||
/* this function should only be called while dev->lock is held */
|
||||
static void ion_buffer_add(struct ion_device *dev,
|
||||
|
@ -66,6 +101,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
|||
unsigned long flags)
|
||||
{
|
||||
struct ion_buffer *buffer;
|
||||
struct sg_table *table;
|
||||
int ret;
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
|
@ -95,11 +131,32 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
|||
goto err1;
|
||||
}
|
||||
|
||||
table = buffer->sg_table;
|
||||
INIT_LIST_HEAD(&buffer->attachments);
|
||||
INIT_LIST_HEAD(&buffer->vmas);
|
||||
mutex_init(&buffer->lock);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
/*
|
||||
* this will set up dma addresses for the sglist -- it is not
|
||||
* technically correct as per the dma api -- a specific
|
||||
* device isn't really taking ownership here. However, in
|
||||
* practice on our systems the only dma_address space is
|
||||
* physical addresses.
|
||||
*/
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
sg_dma_address(sg) = sg_phys(sg);
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&dev->buffer_lock);
|
||||
ion_buffer_add(dev, buffer);
|
||||
mutex_unlock(&dev->buffer_lock);
|
||||
atomic_long_add(len, &heap->total_allocated);
|
||||
return buffer;
|
||||
|
||||
err1:
|
||||
|
@ -125,10 +182,13 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer)
|
|||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_device *dev = buffer->dev;
|
||||
|
||||
msm_dma_buf_freed(buffer);
|
||||
|
||||
mutex_lock(&dev->buffer_lock);
|
||||
rb_erase(&buffer->node, &dev->buffers);
|
||||
mutex_unlock(&dev->buffer_lock);
|
||||
|
||||
atomic_long_sub(buffer->size, &buffer->heap->total_allocated);
|
||||
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
|
||||
ion_heap_freelist_add(heap, buffer);
|
||||
else
|
||||
|
@ -156,6 +216,11 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
|||
|
||||
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
|
||||
{
|
||||
if (buffer->kmap_cnt == 0) {
|
||||
WARN(1, "Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access\n");
|
||||
return;
|
||||
}
|
||||
|
||||
buffer->kmap_cnt--;
|
||||
if (!buffer->kmap_cnt) {
|
||||
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
||||
|
@ -182,7 +247,8 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
|
|||
new_sg = new_table->sgl;
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
memcpy(new_sg, sg, sizeof(*sg));
|
||||
new_sg->dma_address = 0;
|
||||
sg_dma_address(new_sg) = 0;
|
||||
sg_dma_len(new_sg) = 0;
|
||||
new_sg = sg_next(new_sg);
|
||||
}
|
||||
|
||||
|
@ -199,10 +265,11 @@ struct ion_dma_buf_attachment {
|
|||
struct device *dev;
|
||||
struct sg_table *table;
|
||||
struct list_head list;
|
||||
bool dma_mapped;
|
||||
};
|
||||
|
||||
static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
|
||||
struct dma_buf_attachment *attachment)
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct ion_dma_buf_attachment *a;
|
||||
struct sg_table *table;
|
||||
|
@ -220,6 +287,7 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
|
|||
|
||||
a->table = table;
|
||||
a->dev = dev;
|
||||
a->dma_mapped = false;
|
||||
INIT_LIST_HEAD(&a->list);
|
||||
|
||||
attachment->priv = a;
|
||||
|
@ -237,8 +305,8 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
|
|||
struct ion_dma_buf_attachment *a = attachment->priv;
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
free_duped_table(a->table);
|
||||
mutex_lock(&buffer->lock);
|
||||
free_duped_table(a->table);
|
||||
list_del(&a->list);
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
|
@ -250,13 +318,44 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
|
|||
{
|
||||
struct ion_dma_buf_attachment *a = attachment->priv;
|
||||
struct sg_table *table;
|
||||
struct ion_buffer *buffer = attachment->dmabuf->priv;
|
||||
|
||||
table = a->table;
|
||||
|
||||
if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
|
||||
direction))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
map_attrs = attachment->dma_map_attrs;
|
||||
if (!(buffer->flags & ION_FLAG_CACHED) ||
|
||||
!hlos_accessible_buffer(buffer))
|
||||
map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
||||
trace_ion_dma_map_cmo_skip(attachment->dev,
|
||||
attachment->dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
else
|
||||
trace_ion_dma_map_cmo_apply(attachment->dev,
|
||||
attachment->dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
|
||||
if (map_attrs & DMA_ATTR_DELAYED_UNMAP) {
|
||||
count = msm_dma_map_sg_attrs(attachment->dev, table->sgl,
|
||||
table->nents, direction,
|
||||
attachment->dmabuf, map_attrs);
|
||||
} else {
|
||||
count = dma_map_sg_attrs(attachment->dev, table->sgl,
|
||||
table->nents, direction,
|
||||
map_attrs);
|
||||
}
|
||||
|
||||
|
||||
a->dma_mapped = true;
|
||||
mutex_unlock(&buffer->lock);
|
||||
return table;
|
||||
}
|
||||
|
||||
|
@ -264,9 +363,94 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|||
struct sg_table *table,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
|
||||
int map_attrs;
|
||||
struct ion_buffer *buffer = attachment->dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a = attachment->priv;
|
||||
|
||||
map_attrs = attachment->dma_map_attrs;
|
||||
if (!(buffer->flags & ION_FLAG_CACHED) ||
|
||||
!hlos_accessible_buffer(buffer))
|
||||
map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
||||
trace_ion_dma_unmap_cmo_skip(attachment->dev,
|
||||
attachment->dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
else
|
||||
trace_ion_dma_unmap_cmo_apply(attachment->dev,
|
||||
attachment->dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
hlos_accessible_buffer(buffer),
|
||||
attachment->dma_map_attrs,
|
||||
direction);
|
||||
|
||||
if (map_attrs & DMA_ATTR_DELAYED_UNMAP)
|
||||
msm_dma_unmap_sg_attrs(attachment->dev, table->sgl,
|
||||
table->nents, direction,
|
||||
attachment->dmabuf,
|
||||
map_attrs);
|
||||
else
|
||||
dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
|
||||
direction, map_attrs);
|
||||
a->dma_mapped = false;
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
void ion_pages_sync_for_device(struct device *dev, struct page *page,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_set_page(&sg, page, size, 0);
|
||||
/*
|
||||
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
|
||||
* for the targeted device, but this works on the currently targeted
|
||||
* hardware.
|
||||
*/
|
||||
sg_dma_address(&sg) = page_to_phys(page);
|
||||
dma_sync_sg_for_device(dev, &sg, 1, dir);
|
||||
}
|
||||
|
||||
static void ion_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ion_buffer *buffer = vma->vm_private_data;
|
||||
struct ion_vma_list *vma_list;
|
||||
|
||||
vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
|
||||
if (!vma_list)
|
||||
return;
|
||||
vma_list->vma = vma;
|
||||
mutex_lock(&buffer->lock);
|
||||
list_add(&vma_list->list, &buffer->vmas);
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
static void ion_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ion_buffer *buffer = vma->vm_private_data;
|
||||
struct ion_vma_list *vma_list, *tmp;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
|
||||
if (vma_list->vma != vma)
|
||||
continue;
|
||||
list_del(&vma_list->list);
|
||||
kfree(vma_list);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct ion_vma_ops = {
|
||||
.open = ion_vm_open,
|
||||
.close = ion_vm_close,
|
||||
};
|
||||
|
||||
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
@ -281,6 +465,10 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
|||
if (!(buffer->flags & ION_FLAG_CACHED))
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
vma->vm_private_data = buffer;
|
||||
vma->vm_ops = &ion_vma_ops;
|
||||
ion_vm_open(vma);
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
/* now map it to userspace */
|
||||
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
|
||||
|
@ -304,6 +492,7 @@ static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
|
|||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_kmap\n");
|
||||
return buffer->vaddr + offset * PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -312,14 +501,111 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
|
|||
{
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_vmap\n");
|
||||
return buffer->vaddr;
|
||||
}
|
||||
|
||||
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
|
||||
unsigned int nents, unsigned long offset,
|
||||
unsigned long length,
|
||||
enum dma_data_direction dir, bool for_cpu)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
unsigned int len = 0;
|
||||
dma_addr_t sg_dma_addr;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
if (sg_dma_len(sg) == 0)
|
||||
break;
|
||||
|
||||
if (i > 0) {
|
||||
pr_warn("Partial cmo only supported with 1 segment\n"
|
||||
"is dma_set_max_seg_size being set on dev:%s\n",
|
||||
dev_name(dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
unsigned int sg_offset, sg_left, size = 0;
|
||||
|
||||
if (i == 0)
|
||||
sg_dma_addr = sg_dma_address(sg);
|
||||
|
||||
len += sg->length;
|
||||
if (len <= offset)
|
||||
continue;
|
||||
|
||||
sg_left = len - offset;
|
||||
sg_offset = sg->length - sg_left;
|
||||
|
||||
size = (length < sg_left) ? length : sg_left;
|
||||
if (for_cpu)
|
||||
dma_sync_single_range_for_cpu(dev, sg_dma_addr,
|
||||
sg_offset, size, dir);
|
||||
else
|
||||
dma_sync_single_range_for_device(dev, sg_dma_addr,
|
||||
sg_offset, size, dir);
|
||||
|
||||
offset += size;
|
||||
length -= size;
|
||||
sg_dma_addr += sg->length;
|
||||
|
||||
if (length == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ion_sgl_sync_mapped(struct device *dev, struct scatterlist *sgl,
|
||||
unsigned int nents, struct list_head *vmas,
|
||||
enum dma_data_direction dir, bool for_cpu)
|
||||
{
|
||||
struct ion_vma_list *vma_list;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(vma_list, vmas, list) {
|
||||
struct vm_area_struct *vma = vma_list->vma;
|
||||
|
||||
ret = ion_sgl_sync_range(dev, sgl, nents,
|
||||
vma->vm_pgoff * PAGE_SIZE,
|
||||
vma->vm_end - vma->vm_start, dir,
|
||||
for_cpu);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
bool sync_only_mapped)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
void *vaddr;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
false, direction,
|
||||
sync_only_mapped);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Move this elsewhere because we don't always need a vaddr
|
||||
*/
|
||||
|
@ -333,22 +619,100 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
|||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
|
||||
true, direction,
|
||||
sync_only_mapped);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = buffer->heap->priv;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
if (sync_only_mapped)
|
||||
ret = ion_sgl_sync_mapped(dev, table->sgl,
|
||||
table->nents, &buffer->vmas,
|
||||
direction, true);
|
||||
else
|
||||
dma_sync_sg_for_cpu(dev, table->sgl,
|
||||
table->nents, direction);
|
||||
|
||||
if (!ret)
|
||||
trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
else
|
||||
trace_ion_begin_cpu_access_cmo_skip(dev, dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
|
||||
direction);
|
||||
int tmp = 0;
|
||||
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_begin_cpu_access_notmapped(a->dev,
|
||||
dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (sync_only_mapped)
|
||||
tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl,
|
||||
a->table->nents,
|
||||
&buffer->vmas,
|
||||
direction, true);
|
||||
else
|
||||
dma_sync_sg_for_cpu(a->dev, a->table->sgl,
|
||||
a->table->nents, direction);
|
||||
|
||||
if (!tmp) {
|
||||
trace_ion_begin_cpu_access_cmo_apply(a->dev,
|
||||
dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
} else {
|
||||
trace_ion_begin_cpu_access_cmo_skip(a->dev,
|
||||
dmabuf->name, true,
|
||||
true, direction,
|
||||
sync_only_mapped);
|
||||
ret = tmp;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&buffer->lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
bool sync_only_mapped)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
false, direction,
|
||||
sync_only_mapped);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (buffer->heap->ops->map_kernel) {
|
||||
mutex_lock(&buffer->lock);
|
||||
|
@ -356,13 +720,282 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
|||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
|
||||
true, direction,
|
||||
sync_only_mapped);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = buffer->heap->priv;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
if (sync_only_mapped)
|
||||
ret = ion_sgl_sync_mapped(dev, table->sgl,
|
||||
table->nents, &buffer->vmas,
|
||||
direction, false);
|
||||
else
|
||||
dma_sync_sg_for_device(dev, table->sgl,
|
||||
table->nents, direction);
|
||||
|
||||
if (!ret)
|
||||
trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
else
|
||||
trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->name,
|
||||
true, true, direction,
|
||||
sync_only_mapped);
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
|
||||
direction);
|
||||
int tmp = 0;
|
||||
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_end_cpu_access_notmapped(a->dev,
|
||||
dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (sync_only_mapped)
|
||||
tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl,
|
||||
a->table->nents,
|
||||
&buffer->vmas, direction,
|
||||
false);
|
||||
else
|
||||
dma_sync_sg_for_device(a->dev, a->table->sgl,
|
||||
a->table->nents, direction);
|
||||
|
||||
if (!tmp) {
|
||||
trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
sync_only_mapped);
|
||||
} else {
|
||||
trace_ion_end_cpu_access_cmo_skip(a->dev, dmabuf->name,
|
||||
true, true, direction,
|
||||
sync_only_mapped);
|
||||
ret = tmp;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return __ion_dma_buf_begin_cpu_access(dmabuf, direction, false);
|
||||
}
|
||||
|
||||
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return __ion_dma_buf_end_cpu_access(dmabuf, direction, false);
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return __ion_dma_buf_begin_cpu_access(dmabuf, dir, true);
|
||||
}
|
||||
|
||||
static int ion_dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return __ion_dma_buf_end_cpu_access(dmabuf, dir, true);
|
||||
}
|
||||
|
||||
static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
void *vaddr;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
false, dir,
|
||||
false);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Move this elsewhere because we don't always need a vaddr
|
||||
*/
|
||||
if (buffer->heap->ops->map_kernel) {
|
||||
mutex_lock(&buffer->lock);
|
||||
vaddr = ion_buffer_kmap_get(buffer);
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
|
||||
true, dir,
|
||||
false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = buffer->heap->priv;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
ret = ion_sgl_sync_range(dev, table->sgl, table->nents,
|
||||
offset, len, dir, true);
|
||||
|
||||
if (!ret)
|
||||
trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
|
||||
true, true, dir,
|
||||
false);
|
||||
else
|
||||
trace_ion_begin_cpu_access_cmo_skip(dev, dmabuf->name,
|
||||
true, true, dir,
|
||||
false);
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
int tmp = 0;
|
||||
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_begin_cpu_access_notmapped(a->dev,
|
||||
dmabuf->name,
|
||||
true, true,
|
||||
dir,
|
||||
false);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
|
||||
offset, len, dir, true);
|
||||
|
||||
if (!tmp) {
|
||||
trace_ion_begin_cpu_access_cmo_apply(a->dev,
|
||||
dmabuf->name,
|
||||
true, true, dir,
|
||||
false);
|
||||
} else {
|
||||
trace_ion_begin_cpu_access_cmo_skip(a->dev,
|
||||
dmabuf->name,
|
||||
true, true, dir,
|
||||
false);
|
||||
ret = tmp;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
struct ion_dma_buf_attachment *a;
|
||||
int ret = 0;
|
||||
|
||||
if (!hlos_accessible_buffer(buffer)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name,
|
||||
ion_buffer_cached(buffer),
|
||||
false, direction,
|
||||
false);
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (buffer->heap->ops->map_kernel) {
|
||||
mutex_lock(&buffer->lock);
|
||||
ion_buffer_kmap_put(buffer);
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED)) {
|
||||
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
|
||||
true, direction,
|
||||
false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) {
|
||||
struct device *dev = buffer->heap->priv;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
|
||||
ret = ion_sgl_sync_range(dev, table->sgl, table->nents,
|
||||
offset, len, direction, false);
|
||||
|
||||
if (!ret)
|
||||
trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
|
||||
true, true,
|
||||
direction, false);
|
||||
else
|
||||
trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->name,
|
||||
true, true,
|
||||
direction, false);
|
||||
|
||||
mutex_unlock(&buffer->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
int tmp = 0;
|
||||
|
||||
if (!a->dma_mapped) {
|
||||
trace_ion_end_cpu_access_notmapped(a->dev,
|
||||
dmabuf->name,
|
||||
true, true,
|
||||
direction,
|
||||
false);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
|
||||
offset, len, direction, false);
|
||||
|
||||
if (!tmp) {
|
||||
trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
|
||||
true, true,
|
||||
direction, false);
|
||||
|
||||
} else {
|
||||
trace_ion_end_cpu_access_cmo_skip(a->dev, dmabuf->name,
|
||||
true, true, direction,
|
||||
false);
|
||||
ret = tmp;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&buffer->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ion_dma_buf_get_flags(struct dma_buf *dmabuf,
|
||||
unsigned long *flags)
|
||||
{
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
*flags = buffer->flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -375,19 +1008,26 @@ static const struct dma_buf_ops dma_buf_ops = {
|
|||
.detach = ion_dma_buf_detatch,
|
||||
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
|
||||
.end_cpu_access = ion_dma_buf_end_cpu_access,
|
||||
.begin_cpu_access_umapped = ion_dma_buf_begin_cpu_access_umapped,
|
||||
.end_cpu_access_umapped = ion_dma_buf_end_cpu_access_umapped,
|
||||
.begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial,
|
||||
.end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial,
|
||||
.map_atomic = ion_dma_buf_kmap,
|
||||
.unmap_atomic = ion_dma_buf_kunmap,
|
||||
.map = ion_dma_buf_kmap,
|
||||
.unmap = ion_dma_buf_kunmap,
|
||||
.vmap = ion_dma_buf_vmap,
|
||||
.vunmap = ion_dma_buf_vunmap,
|
||||
.get_flags = ion_dma_buf_get_flags,
|
||||
};
|
||||
|
||||
int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
|
||||
struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct ion_device *dev = internal_dev;
|
||||
struct ion_buffer *buffer = NULL;
|
||||
struct ion_heap *heap;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
int fd;
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
|
||||
|
@ -401,7 +1041,7 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
|
|||
len = PAGE_ALIGN(len);
|
||||
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
down_read(&dev->lock);
|
||||
plist_for_each_entry(heap, &dev->heaps, node) {
|
||||
|
@ -415,10 +1055,10 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
|
|||
up_read(&dev->lock);
|
||||
|
||||
if (!buffer)
|
||||
return -ENODEV;
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
return ERR_CAST(buffer);
|
||||
|
||||
exp_info.ops = &dma_buf_ops;
|
||||
exp_info.size = buffer->size;
|
||||
|
@ -426,10 +1066,59 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
|
|||
exp_info.priv = buffer;
|
||||
|
||||
dmabuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
if (IS_ERR(dmabuf))
|
||||
_ion_buffer_destroy(buffer);
|
||||
return PTR_ERR(dmabuf);
|
||||
|
||||
return dmabuf;
|
||||
}
|
||||
|
||||
struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct ion_device *dev = internal_dev;
|
||||
struct ion_heap *heap;
|
||||
bool type_valid = false;
|
||||
|
||||
pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
|
||||
len, heap_id_mask, flags);
|
||||
/*
|
||||
* traverse the list of heaps available in this system in priority
|
||||
* order. Check the heap type is supported.
|
||||
*/
|
||||
|
||||
down_read(&dev->lock);
|
||||
plist_for_each_entry(heap, &dev->heaps, node) {
|
||||
/* if the caller didn't specify this heap id */
|
||||
if (!((1 << heap->id) & heap_id_mask))
|
||||
continue;
|
||||
if (heap->type == ION_HEAP_TYPE_SYSTEM ||
|
||||
heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA ||
|
||||
heap->type ==
|
||||
(enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE) {
|
||||
type_valid = true;
|
||||
} else {
|
||||
pr_warn("%s: heap type not supported, type:%d\n",
|
||||
__func__, heap->type);
|
||||
}
|
||||
break;
|
||||
}
|
||||
up_read(&dev->lock);
|
||||
|
||||
if (!type_valid)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return ion_alloc_dmabuf(len, heap_id_mask, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ion_alloc);
|
||||
|
||||
int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags)
|
||||
{
|
||||
int fd;
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
dmabuf = ion_alloc_dmabuf(len, heap_id_mask, flags);
|
||||
if (IS_ERR(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
|
||||
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
|
@ -461,7 +1150,7 @@ int ion_query_heaps(struct ion_heap_query *query)
|
|||
max_cnt = query->cnt;
|
||||
|
||||
plist_for_each_entry(heap, &dev->heaps, node) {
|
||||
strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
|
||||
strlcpy(hdata.name, heap->name, sizeof(hdata.name));
|
||||
hdata.name[sizeof(hdata.name) - 1] = '\0';
|
||||
hdata.type = heap->type;
|
||||
hdata.heap_id = heap->id;
|
||||
|
@ -526,9 +1215,8 @@ static int debug_shrink_get(void *data, u64 *val)
|
|||
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
|
||||
debug_shrink_set, "%llu\n");
|
||||
|
||||
void ion_device_add_heap(struct ion_heap *heap)
|
||||
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
|
||||
{
|
||||
struct ion_device *dev = internal_dev;
|
||||
int ret;
|
||||
|
||||
if (!heap->ops->allocate || !heap->ops->free)
|
||||
|
@ -549,7 +1237,6 @@ void ion_device_add_heap(struct ion_heap *heap)
|
|||
|
||||
heap->dev = dev;
|
||||
down_write(&dev->lock);
|
||||
heap->id = heap_id++;
|
||||
/*
|
||||
* use negative heap->id to reverse the priority -- when traversing
|
||||
* the list later attempt higher id numbers first
|
||||
|
@ -570,14 +1257,14 @@ void ion_device_add_heap(struct ion_heap *heap)
|
|||
}
|
||||
EXPORT_SYMBOL(ion_device_add_heap);
|
||||
|
||||
static int ion_device_create(void)
|
||||
struct ion_device *ion_device_create(void)
|
||||
{
|
||||
struct ion_device *idev;
|
||||
int ret;
|
||||
|
||||
idev = kzalloc(sizeof(*idev), GFP_KERNEL);
|
||||
if (!idev)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
idev->dev.minor = MISC_DYNAMIC_MINOR;
|
||||
idev->dev.name = "ion";
|
||||
|
@ -587,7 +1274,7 @@ static int ion_device_create(void)
|
|||
if (ret) {
|
||||
pr_err("ion: failed to register misc device.\n");
|
||||
kfree(idev);
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
idev->debug_root = debugfs_create_dir("ion", NULL);
|
||||
|
@ -596,6 +1283,15 @@ static int ion_device_create(void)
|
|||
init_rwsem(&idev->lock);
|
||||
plist_head_init(&idev->heaps);
|
||||
internal_dev = idev;
|
||||
return 0;
|
||||
return idev;
|
||||
}
|
||||
subsys_initcall(ion_device_create);
|
||||
EXPORT_SYMBOL(ion_device_create);
|
||||
|
||||
void ion_device_destroy(struct ion_device *dev)
|
||||
{
|
||||
misc_deregister(&dev->dev);
|
||||
debugfs_remove_recursive(dev->debug_root);
|
||||
/* XXX need to free the heaps and clients ? */
|
||||
kfree(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(ion_device_destroy);
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
* drivers/staging/android/ion/ion.h
|
||||
*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ION_H
|
||||
|
@ -18,8 +20,37 @@
|
|||
#include <linux/shrinker.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include "ion_kernel.h"
|
||||
#include "../uapi/ion.h"
|
||||
#include "../uapi/msm_ion.h"
|
||||
|
||||
#define ION_ADSP_HEAP_NAME "adsp"
|
||||
#define ION_SYSTEM_HEAP_NAME "system"
|
||||
#define ION_MM_HEAP_NAME "mm"
|
||||
#define ION_SPSS_HEAP_NAME "spss"
|
||||
#define ION_QSECOM_HEAP_NAME "qsecom"
|
||||
#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta"
|
||||
#define ION_SECURE_HEAP_NAME "secure_heap"
|
||||
#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
|
||||
#define ION_AUDIO_HEAP_NAME "audio"
|
||||
|
||||
#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
|
||||
|
||||
/**
|
||||
* Debug feature. Make ION allocations DMA
|
||||
* ready to help identify clients who are wrongly
|
||||
* dependending on ION allocations being DMA
|
||||
* ready.
|
||||
*
|
||||
* As default set to 'false' since ION allocations
|
||||
* are no longer required to be DMA ready
|
||||
*/
|
||||
#ifdef CONFIG_ION_FORCE_DMA_SYNC
|
||||
#define MAKE_ION_ALLOC_DMA_READY 1
|
||||
#else
|
||||
#define MAKE_ION_ALLOC_DMA_READY 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct ion_platform_heap - defines a heap in the given platform
|
||||
|
@ -44,6 +75,23 @@ struct ion_platform_heap {
|
|||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ion_platform_data - array of platform heaps passed from board file
|
||||
* @nr: number of structures in the array
|
||||
* @heaps: array of platform_heap structions
|
||||
*
|
||||
* Provided by the board file in the form of platform data to a platform device.
|
||||
*/
|
||||
struct ion_platform_data {
|
||||
int nr;
|
||||
struct ion_platform_heap *heaps;
|
||||
};
|
||||
|
||||
struct ion_vma_list {
|
||||
struct list_head list;
|
||||
struct vm_area_struct *vma;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ion_buffer - metadata for a particular buffer
|
||||
* @ref: reference count
|
||||
|
@ -59,6 +107,7 @@ struct ion_platform_heap {
|
|||
* @kmap_cnt: number of times the buffer is mapped to the kernel
|
||||
* @vaddr: the kernel mapping if kmap_cnt is not zero
|
||||
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
|
||||
* @vmas: list of vma's mapping this buffer
|
||||
*/
|
||||
struct ion_buffer {
|
||||
union {
|
||||
|
@ -71,11 +120,13 @@ struct ion_buffer {
|
|||
unsigned long private_flags;
|
||||
size_t size;
|
||||
void *priv_virt;
|
||||
/* Protect ion buffer */
|
||||
struct mutex lock;
|
||||
int kmap_cnt;
|
||||
void *vaddr;
|
||||
struct sg_table *sg_table;
|
||||
struct list_head attachments;
|
||||
struct list_head vmas;
|
||||
};
|
||||
|
||||
void ion_buffer_destroy(struct ion_buffer *buffer);
|
||||
|
@ -90,6 +141,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
|
|||
struct ion_device {
|
||||
struct miscdevice dev;
|
||||
struct rb_root buffers;
|
||||
/* buffer_lock used for adding and removing buffers */
|
||||
struct mutex buffer_lock;
|
||||
struct rw_semaphore lock;
|
||||
struct plist_head heaps;
|
||||
|
@ -152,6 +204,7 @@ struct ion_heap_ops {
|
|||
* MUST be unique
|
||||
* @name: used for debugging
|
||||
* @shrinker: a shrinker for the heap
|
||||
* @priv: private heap data
|
||||
* @free_list: free list head if deferred free is used
|
||||
* @free_list_size size of the deferred free list in bytes
|
||||
* @lock: protects the free list
|
||||
|
@ -174,21 +227,46 @@ struct ion_heap {
|
|||
unsigned int id;
|
||||
const char *name;
|
||||
struct shrinker shrinker;
|
||||
void *priv;
|
||||
struct list_head free_list;
|
||||
size_t free_list_size;
|
||||
/* Protect the free list */
|
||||
spinlock_t free_lock;
|
||||
wait_queue_head_t waitqueue;
|
||||
struct task_struct *task;
|
||||
atomic_long_t total_allocated;
|
||||
|
||||
int (*debug_show)(struct ion_heap *heap, struct seq_file *s,
|
||||
void *unused);
|
||||
};
|
||||
|
||||
/**
|
||||
* ion_buffer_cached - this ion buffer is cached
|
||||
* @buffer: buffer
|
||||
*
|
||||
* indicates whether this ion buffer is cached
|
||||
*/
|
||||
bool ion_buffer_cached(struct ion_buffer *buffer);
|
||||
|
||||
/**
|
||||
* ion_device_create - allocates and returns an ion device
|
||||
*
|
||||
* returns a valid device or -PTR_ERR
|
||||
*/
|
||||
struct ion_device *ion_device_create(void);
|
||||
|
||||
/**
|
||||
* ion_device_destroy - free and device and it's resource
|
||||
* @dev: the device
|
||||
*/
|
||||
void ion_device_destroy(struct ion_device *dev);
|
||||
|
||||
/**
|
||||
* ion_device_add_heap - adds a heap to the ion device
|
||||
* @dev: the device
|
||||
* @heap: the heap to add
|
||||
*/
|
||||
void ion_device_add_heap(struct ion_heap *heap);
|
||||
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
|
||||
|
||||
/**
|
||||
* some helpers for common operations on buffers using the sg_table
|
||||
|
@ -201,9 +279,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
|||
int ion_heap_buffer_zero(struct ion_buffer *buffer);
|
||||
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
|
||||
|
||||
int ion_alloc(size_t len,
|
||||
unsigned int heap_id_mask,
|
||||
unsigned int flags);
|
||||
int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags);
|
||||
|
||||
/**
|
||||
* ion_heap_init_shrinker
|
||||
|
@ -266,8 +342,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
|
|||
* the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
|
||||
* flag.
|
||||
*/
|
||||
size_t ion_heap_freelist_shrink(struct ion_heap *heap,
|
||||
size_t size);
|
||||
size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size);
|
||||
|
||||
/**
|
||||
* ion_heap_freelist_size - returns the size of the freelist in bytes
|
||||
|
@ -275,6 +350,55 @@ size_t ion_heap_freelist_shrink(struct ion_heap *heap,
|
|||
*/
|
||||
size_t ion_heap_freelist_size(struct ion_heap *heap);
|
||||
|
||||
/**
|
||||
* functions for creating and destroying the built in ion heaps.
|
||||
* architectures can add their own custom architecture specific
|
||||
* heaps as appropriate.
|
||||
*/
|
||||
|
||||
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data);
|
||||
void ion_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused);
|
||||
void ion_system_heap_destroy(struct ion_heap *heap);
|
||||
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap);
|
||||
void ion_system_contig_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data);
|
||||
void ion_carveout_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data);
|
||||
void ion_chunk_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data);
|
||||
void ion_secure_cma_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data);
|
||||
void ion_cma_heap_destroy(struct ion_heap *heap);
|
||||
#else
|
||||
static inline struct ion_heap
|
||||
*ion_secure_cma_heap_create(struct ion_platform_heap *h)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void ion_cma_heap_destroy(struct ion_heap *h) {}
|
||||
|
||||
static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void ion_cma_heap_destroy(struct ion_heap *h) {}
|
||||
#endif
|
||||
|
||||
struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
|
||||
void ion_system_secure_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
|
||||
void ion_cma_secure_heap_destroy(struct ion_heap *heap);
|
||||
|
||||
/**
|
||||
* functions for creating and destroying a heap pool -- allows you
|
||||
* to keep a pool of pre allocated memory to use from your heap. Keeping
|
||||
|
@ -294,6 +418,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
|
|||
* @gfp_mask: gfp_mask to use from alloc
|
||||
* @order: order of pages in the pool
|
||||
* @list: plist node for list of pools
|
||||
* @cached: it's cached pool or not
|
||||
*
|
||||
* Allows you to keep a pool of pre allocated pages to use from your heap.
|
||||
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
|
||||
|
@ -303,19 +428,29 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
|
|||
struct ion_page_pool {
|
||||
int high_count;
|
||||
int low_count;
|
||||
bool cached;
|
||||
struct list_head high_items;
|
||||
struct list_head low_items;
|
||||
/* Protect the pool */
|
||||
struct mutex mutex;
|
||||
gfp_t gfp_mask;
|
||||
unsigned int order;
|
||||
struct plist_node list;
|
||||
};
|
||||
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
|
||||
bool cached);
|
||||
void ion_page_pool_destroy(struct ion_page_pool *pool);
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool);
|
||||
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
|
||||
|
||||
struct ion_heap *get_ion_heap(int heap_id);
|
||||
struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *a);
|
||||
void ion_page_pool_free_immediate(struct ion_page_pool *pool,
|
||||
struct page *page);
|
||||
int ion_page_pool_total(struct ion_page_pool *pool, bool high);
|
||||
size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid);
|
||||
|
||||
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
|
||||
* @pool: the pool
|
||||
* @gfp_mask: the memory type to reclaim
|
||||
|
@ -326,6 +461,20 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
|
|||
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
||||
int nr_to_scan);
|
||||
|
||||
/**
|
||||
* ion_pages_sync_for_device - cache flush pages for use with the specified
|
||||
* device
|
||||
* @dev: the device the pages will be used with
|
||||
* @page: the first page to be flushed
|
||||
* @size: size in bytes of region to be flushed
|
||||
* @dir: direction of dma transfer
|
||||
*/
|
||||
void ion_pages_sync_for_device(struct device *dev, struct page *page,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
|
||||
int ion_walk_heaps(int heap_id, enum ion_heap_type type, void *data,
|
||||
int (*f)(struct ion_heap *heap, void *data));
|
||||
|
||||
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
|
||||
|
||||
int ion_query_heaps(struct ion_heap_query *query);
|
||||
|
|
|
@ -24,7 +24,7 @@ struct ion_carveout_heap {
|
|||
};
|
||||
|
||||
static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
|
||||
unsigned long size)
|
||||
unsigned long size)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
|
@ -55,6 +55,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
|||
struct sg_table *table;
|
||||
phys_addr_t paddr;
|
||||
int ret;
|
||||
struct device *dev = heap->priv;
|
||||
|
||||
table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
|
@ -72,6 +73,10 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
|||
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
|
||||
buffer->sg_table = table;
|
||||
|
||||
if (ion_buffer_cached(buffer))
|
||||
ion_pages_sync_for_device(dev, sg_page(table->sgl),
|
||||
buffer->size, DMA_FROM_DEVICE);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_table:
|
||||
|
@ -86,10 +91,15 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
|||
struct ion_heap *heap = buffer->heap;
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct page *page = sg_page(table->sgl);
|
||||
phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
||||
phys_addr_t paddr = page_to_phys(page);
|
||||
struct device *dev = (struct device *)heap->priv;
|
||||
|
||||
ion_heap_buffer_zero(buffer);
|
||||
|
||||
if (ion_buffer_cached(buffer))
|
||||
ion_pages_sync_for_device(dev, page, buffer->size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
ion_carveout_free(heap, paddr, buffer->size);
|
||||
sg_free_table(table);
|
||||
kfree(table);
|
||||
|
@ -110,10 +120,13 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
|
|||
|
||||
struct page *page;
|
||||
size_t size;
|
||||
struct device *dev = (struct device *)heap_data->priv;
|
||||
|
||||
page = pfn_to_page(PFN_DOWN(heap_data->base));
|
||||
size = heap_data->size;
|
||||
|
||||
ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
@ -136,3 +149,13 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
|
|||
|
||||
return &carveout_heap->heap;
|
||||
}
|
||||
|
||||
void ion_carveout_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_carveout_heap *carveout_heap =
|
||||
container_of(heap, struct ion_carveout_heap, heap);
|
||||
|
||||
gen_pool_destroy(carveout_heap->pool);
|
||||
kfree(carveout_heap);
|
||||
carveout_heap = NULL;
|
||||
}
|
||||
|
|
|
@ -91,6 +91,10 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
|||
|
||||
ion_heap_buffer_zero(buffer);
|
||||
|
||||
if (ion_buffer_cached(buffer))
|
||||
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||
sg->length);
|
||||
|
@ -118,6 +122,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
|
|||
page = pfn_to_page(PFN_DOWN(heap_data->base));
|
||||
size = heap_data->size;
|
||||
|
||||
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
@ -151,3 +157,12 @@ error_gen_pool_create:
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void ion_chunk_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_chunk_heap *chunk_heap =
|
||||
container_of(heap, struct ion_chunk_heap, heap);
|
||||
|
||||
gen_pool_destroy(chunk_heap->pool);
|
||||
kfree(chunk_heap);
|
||||
chunk_heap = NULL;
|
||||
}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* drivers/staging/android/ion/ion_cma_heap.c
|
||||
*
|
||||
* Copyright (C) Linaro 2012
|
||||
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
|
||||
*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
@ -12,9 +12,11 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include "ion.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
struct ion_cma_heap {
|
||||
struct ion_heap heap;
|
||||
|
@ -35,6 +37,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
|||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
unsigned long align = get_order(size);
|
||||
int ret;
|
||||
struct device *dev = heap->priv;
|
||||
|
||||
if (align > CONFIG_CMA_ALIGNMENT)
|
||||
align = CONFIG_CMA_ALIGNMENT;
|
||||
|
@ -43,22 +46,30 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
|||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
if (PageHighMem(pages)) {
|
||||
unsigned long nr_clear_pages = nr_pages;
|
||||
struct page *page = pages;
|
||||
if (!(flags & ION_FLAG_SECURE)) {
|
||||
if (PageHighMem(pages)) {
|
||||
unsigned long nr_clear_pages = nr_pages;
|
||||
struct page *page = pages;
|
||||
|
||||
while (nr_clear_pages > 0) {
|
||||
void *vaddr = kmap_atomic(page);
|
||||
while (nr_clear_pages > 0) {
|
||||
void *vaddr = kmap_atomic(page);
|
||||
|
||||
memset(vaddr, 0, PAGE_SIZE);
|
||||
kunmap_atomic(vaddr);
|
||||
page++;
|
||||
nr_clear_pages--;
|
||||
memset(vaddr, 0, PAGE_SIZE);
|
||||
kunmap_atomic(vaddr);
|
||||
page++;
|
||||
nr_clear_pages--;
|
||||
}
|
||||
} else {
|
||||
memset(page_address(pages), 0, size);
|
||||
}
|
||||
} else {
|
||||
memset(page_address(pages), 0, size);
|
||||
}
|
||||
|
||||
if (MAKE_ION_ALLOC_DMA_READY ||
|
||||
(flags & ION_FLAG_SECURE) ||
|
||||
(!ion_buffer_cached(buffer)))
|
||||
ion_pages_sync_for_device(dev, pages, size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
goto err;
|
||||
|
@ -101,9 +112,13 @@ static struct ion_heap_ops ion_cma_ops = {
|
|||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
};
|
||||
|
||||
static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
|
||||
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_cma_heap *cma_heap;
|
||||
struct device *dev = (struct device *)data->priv;
|
||||
|
||||
if (!dev->cma_area)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
|
||||
|
||||
|
@ -115,28 +130,85 @@ static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
|
|||
* get device from private heaps data, later it will be
|
||||
* used to make the link with reserved CMA memory
|
||||
*/
|
||||
cma_heap->cma = cma;
|
||||
cma_heap->cma = dev->cma_area;
|
||||
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
|
||||
return &cma_heap->heap;
|
||||
}
|
||||
|
||||
static int __ion_add_cma_heaps(struct cma *cma, void *data)
|
||||
void ion_cma_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
|
||||
|
||||
heap = __ion_cma_heap_create(cma);
|
||||
if (IS_ERR(heap))
|
||||
return PTR_ERR(heap);
|
||||
|
||||
heap->name = cma_get_name(cma);
|
||||
|
||||
ion_device_add_heap(heap);
|
||||
return 0;
|
||||
kfree(cma_heap);
|
||||
}
|
||||
|
||||
static int ion_add_cma_heaps(void)
|
||||
static void ion_secure_cma_free(struct ion_buffer *buffer)
|
||||
{
|
||||
cma_for_each_area(__ion_add_cma_heaps, NULL);
|
||||
return 0;
|
||||
if (ion_hyp_unassign_sg_from_flags(buffer->sg_table, buffer->flags,
|
||||
false))
|
||||
return;
|
||||
|
||||
ion_cma_free(buffer);
|
||||
}
|
||||
|
||||
static int ion_secure_cma_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer, unsigned long len,
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ion_cma_allocate(heap, buffer, len, flags);
|
||||
if (ret) {
|
||||
dev_err(heap->priv, "Unable to allocate cma buffer");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ion_hyp_assign_sg_from_flags(buffer->sg_table, flags, false);
|
||||
if (ret)
|
||||
goto out_free_buf;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free_buf:
|
||||
ion_secure_cma_free(buffer);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ion_heap_ops ion_secure_cma_ops = {
|
||||
.allocate = ion_secure_cma_allocate,
|
||||
.free = ion_secure_cma_free,
|
||||
.map_user = ion_heap_map_user,
|
||||
.map_kernel = ion_heap_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
};
|
||||
|
||||
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_cma_heap *cma_heap;
|
||||
struct device *dev = (struct device *)data->priv;
|
||||
|
||||
if (!dev->cma_area)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
|
||||
|
||||
if (!cma_heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cma_heap->heap.ops = &ion_secure_cma_ops;
|
||||
/*
|
||||
* get device from private heaps data, later it will be
|
||||
* used to make the link with reserved CMA memory
|
||||
*/
|
||||
cma_heap->cma = dev->cma_area;
|
||||
cma_heap->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA;
|
||||
return &cma_heap->heap;
|
||||
}
|
||||
|
||||
void ion_cma_secure_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
|
||||
|
||||
kfree(cma_heap);
|
||||
}
|
||||
device_initcall(ion_add_cma_heaps);
|
||||
|
|
825
drivers/staging/android/ion/ion_cma_secure_heap.c
Normal file
825
drivers/staging/android/ion/ion_cma_secure_heap.c
Normal file
|
@ -0,0 +1,825 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) Linaro 2012
|
||||
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
|
||||
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/ion.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/msm_ion.h>
|
||||
#include <trace/events/kmem.h>
|
||||
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/* for ion_heap_ops structure */
|
||||
#include "ion.h"
|
||||
|
||||
#define ION_CMA_ALLOCATE_FAILED NULL
|
||||
|
||||
struct ion_secure_cma_non_contig_info {
|
||||
dma_addr_t phys;
|
||||
int len;
|
||||
struct list_head entry;
|
||||
};
|
||||
|
||||
struct ion_secure_cma_buffer_info {
|
||||
dma_addr_t phys;
|
||||
struct sg_table *table;
|
||||
bool is_cached;
|
||||
int len;
|
||||
struct list_head non_contig_list;
|
||||
unsigned long ncelems;
|
||||
};
|
||||
|
||||
struct ion_cma_alloc_chunk {
|
||||
void *cpu_addr;
|
||||
struct list_head entry;
|
||||
dma_addr_t handle;
|
||||
unsigned long chunk_size;
|
||||
atomic_t cnt;
|
||||
};
|
||||
|
||||
struct ion_cma_secure_heap {
|
||||
struct device *dev;
|
||||
/*
|
||||
* Protects against races between threads allocating memory/adding to
|
||||
* pool at the same time. (e.g. thread 1 adds to pool, thread 2
|
||||
* allocates thread 1's memory before thread 1 knows it needs to
|
||||
* allocate more.
|
||||
* Admittedly this is fairly coarse grained right now but the chance for
|
||||
* contention on this lock is unlikely right now. This can be changed if
|
||||
* this ever changes in the future
|
||||
*/
|
||||
struct mutex alloc_lock;
|
||||
/*
|
||||
* protects the list of memory chunks in this pool
|
||||
*/
|
||||
struct mutex chunk_lock;
|
||||
struct ion_heap heap;
|
||||
/*
|
||||
* Bitmap for allocation. This contains the aggregate of all chunks.
|
||||
*/
|
||||
unsigned long *bitmap;
|
||||
/*
|
||||
* List of all allocated chunks
|
||||
*
|
||||
* This is where things get 'clever'. Individual allocations from
|
||||
* dma_alloc_coherent must be allocated and freed in one chunk.
|
||||
* We don't just want to limit the allocations to those confined
|
||||
* within a single chunk (if clients allocate n small chunks we would
|
||||
* never be able to use the combined size). The bitmap allocator is
|
||||
* used to find the contiguous region and the parts of the chunks are
|
||||
* marked off as used. The chunks won't be freed in the shrinker until
|
||||
* the usage is actually zero.
|
||||
*/
|
||||
struct list_head chunks;
|
||||
int npages;
|
||||
phys_addr_t base;
|
||||
struct work_struct work;
|
||||
unsigned long last_alloc;
|
||||
struct shrinker shrinker;
|
||||
atomic_t total_allocated;
|
||||
atomic_t total_pool_size;
|
||||
atomic_t total_leaked;
|
||||
unsigned long heap_size;
|
||||
unsigned long default_prefetch_size;
|
||||
};
|
||||
|
||||
static void ion_secure_pool_pages(struct work_struct *work);
|
||||
|
||||
static int ion_heap_allow_secure_allocation(enum ion_heap_type type)
|
||||
{
|
||||
return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create scatter-list for the already allocated DMA buffer.
|
||||
* This function could be replace by dma_common_get_sgtable
|
||||
* as soon as it will avalaible.
|
||||
*/
|
||||
static int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
dma_addr_t handle, size_t size)
|
||||
{
|
||||
struct page *page = pfn_to_page(PFN_DOWN(handle));
|
||||
int ret;
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
sg_dma_address(sgt->sgl) = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ion_secure_cma_add_to_pool(struct ion_cma_secure_heap *sheap,
|
||||
unsigned long len,
|
||||
bool prefetch)
|
||||
{
|
||||
void *cpu_addr;
|
||||
dma_addr_t handle;
|
||||
unsigned long attrs = 0;
|
||||
int ret = 0;
|
||||
struct ion_cma_alloc_chunk *chunk;
|
||||
atomic_t *temp = &sheap->total_pool_size;
|
||||
|
||||
trace_ion_secure_cma_add_to_pool_start(len,
|
||||
atomic_read(temp),
|
||||
prefetch);
|
||||
mutex_lock(&sheap->chunk_lock);
|
||||
|
||||
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
|
||||
if (!chunk) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
attrs = DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_SKIP_ZEROING;
|
||||
|
||||
cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
|
||||
attrs);
|
||||
|
||||
if (!cpu_addr) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
chunk->cpu_addr = cpu_addr;
|
||||
chunk->handle = handle;
|
||||
chunk->chunk_size = len;
|
||||
atomic_set(&chunk->cnt, 0);
|
||||
list_add(&chunk->entry, &sheap->chunks);
|
||||
atomic_add(len, &sheap->total_pool_size);
|
||||
/* clear the bitmap to indicate this region can be allocated from */
|
||||
bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
|
||||
len >> PAGE_SHIFT);
|
||||
goto out;
|
||||
|
||||
out_free:
|
||||
kfree(chunk);
|
||||
out:
|
||||
mutex_unlock(&sheap->chunk_lock);
|
||||
|
||||
trace_ion_secure_cma_add_to_pool_end(len,
|
||||
atomic_read(temp),
|
||||
prefetch);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ion_secure_pool_pages(struct work_struct *work)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap = container_of(work,
|
||||
struct ion_cma_secure_heap, work);
|
||||
|
||||
ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* @s1: start of the first region
|
||||
* @l1: length of the first region
|
||||
* @s2: start of the second region
|
||||
* @l2: length of the second region
|
||||
*
|
||||
* Returns the total number of bytes that intersect.
|
||||
*
|
||||
* s1 is the region we are trying to clear so s2 may be subsumed by s1 but the
|
||||
* maximum size to clear should only ever be l1
|
||||
*
|
||||
*/
|
||||
static unsigned int intersect(unsigned long s1, unsigned long l1,
|
||||
unsigned long s2, unsigned long l2)
|
||||
{
|
||||
unsigned long base1 = s1;
|
||||
unsigned long end1 = s1 + l1;
|
||||
unsigned long base2 = s2;
|
||||
unsigned long end2 = s2 + l2;
|
||||
|
||||
/* Case 0: The regions don't overlap at all */
|
||||
if (!(base1 < end2 && base2 < end1))
|
||||
return 0;
|
||||
|
||||
/* Case 1: region 2 is subsumed by region 1 */
|
||||
if (base1 <= base2 && end2 <= end1)
|
||||
return l2;
|
||||
|
||||
/* case 2: region 1 is subsumed by region 2 */
|
||||
if (base2 <= base1 && end1 <= end2)
|
||||
return l1;
|
||||
|
||||
/* case 3: region1 overlaps region2 on the bottom */
|
||||
if (base2 < end1 && base2 > base1)
|
||||
return end1 - base2;
|
||||
|
||||
/* case 4: region 2 overlaps region1 on the bottom */
|
||||
if (base1 < end2 && base1 > base2)
|
||||
return end2 - base1;
|
||||
|
||||
pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n",
|
||||
s1, l1, s2, l2);
|
||||
WARN_ON(1);
|
||||
/* retrun max intersection value, so that it will fail later*/
|
||||
return (unsigned int)(~0);
|
||||
}
|
||||
|
||||
int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
|
||||
{
|
||||
unsigned long len = (unsigned long)data;
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(heap, struct ion_cma_secure_heap, heap);
|
||||
unsigned long diff;
|
||||
|
||||
if ((int)heap->type != ION_HEAP_TYPE_SECURE_DMA)
|
||||
return -EINVAL;
|
||||
|
||||
if (len == 0)
|
||||
len = sheap->default_prefetch_size;
|
||||
|
||||
/*
|
||||
* Only prefetch as much space as there is left in the pool so
|
||||
* check against the current free size of the heap.
|
||||
* This is slightly racy if someone else is allocating at the same
|
||||
* time. CMA has a restricted size for the heap so worst case
|
||||
* the prefetch doesn't work because the allocation fails.
|
||||
*/
|
||||
diff = sheap->heap_size - atomic_read(&sheap->total_pool_size);
|
||||
|
||||
if (len > diff)
|
||||
len = diff;
|
||||
|
||||
sheap->last_alloc = len;
|
||||
trace_ion_prefetching(sheap->last_alloc);
|
||||
schedule_work(&sheap->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bad_math_dump(unsigned long len, int total_overlap,
|
||||
struct ion_cma_secure_heap *sheap,
|
||||
bool alloc, dma_addr_t paddr)
|
||||
{
|
||||
struct list_head *entry;
|
||||
|
||||
pr_err("Bad math! expected total was %lx actual was %x\n",
|
||||
len, total_overlap);
|
||||
pr_err("attempted %s address was %pa len %lx\n",
|
||||
alloc ? "allocation" : "free", &paddr, len);
|
||||
pr_err("chunks:\n");
|
||||
list_for_each(entry, &sheap->chunks) {
|
||||
struct ion_cma_alloc_chunk *chunk =
|
||||
container_of(entry,
|
||||
struct ion_cma_alloc_chunk, entry);
|
||||
pr_info("--- pa %pa len %lx\n",
|
||||
&chunk->handle, chunk->chunk_size);
|
||||
}
|
||||
WARN(1, "mismatch in the sizes of secure cma chunks\n");
|
||||
}
|
||||
|
||||
static int
|
||||
ion_secure_cma_alloc_from_pool(struct ion_cma_secure_heap *sheap,
|
||||
dma_addr_t *phys,
|
||||
unsigned long len)
|
||||
{
|
||||
dma_addr_t paddr;
|
||||
unsigned long page_no;
|
||||
int ret = 0;
|
||||
int total_overlap = 0;
|
||||
struct list_head *entry;
|
||||
|
||||
mutex_lock(&sheap->chunk_lock);
|
||||
|
||||
page_no = bitmap_find_next_zero_area(sheap->bitmap,
|
||||
sheap->npages, 0,
|
||||
len >> PAGE_SHIFT, 0);
|
||||
if (page_no >= sheap->npages) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT);
|
||||
paddr = sheap->base + (page_no << PAGE_SHIFT);
|
||||
|
||||
list_for_each(entry, &sheap->chunks) {
|
||||
struct ion_cma_alloc_chunk *chunk = container_of(entry,
|
||||
struct ion_cma_alloc_chunk, entry);
|
||||
int overlap = intersect(chunk->handle,
|
||||
chunk->chunk_size, paddr, len);
|
||||
|
||||
atomic_add(overlap, &chunk->cnt);
|
||||
total_overlap += overlap;
|
||||
}
|
||||
|
||||
if (total_overlap != len) {
|
||||
bad_math_dump(len, total_overlap, sheap, 1, paddr);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*phys = paddr;
|
||||
out:
|
||||
mutex_unlock(&sheap->chunk_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
|
||||
struct ion_cma_alloc_chunk *chunk)
|
||||
{
|
||||
unsigned long attrs = 0;
|
||||
|
||||
attrs = DMA_ATTR_NO_KERNEL_MAPPING;
|
||||
/* This region is 'allocated' and not available to allocate from */
|
||||
bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT,
|
||||
chunk->chunk_size >> PAGE_SHIFT);
|
||||
dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr,
|
||||
chunk->handle, attrs);
|
||||
atomic_sub(chunk->chunk_size, &sheap->total_pool_size);
|
||||
list_del(&chunk->entry);
|
||||
kfree(chunk);
|
||||
}
|
||||
|
||||
static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
|
||||
int max_nr)
|
||||
{
|
||||
struct list_head *entry, *_n;
|
||||
unsigned long drained_size = 0, skipped_size = 0;
|
||||
|
||||
trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);
|
||||
|
||||
list_for_each_safe(entry, _n, &sheap->chunks) {
|
||||
struct ion_cma_alloc_chunk *chunk = container_of(entry,
|
||||
struct ion_cma_alloc_chunk, entry);
|
||||
|
||||
if (max_nr < 0)
|
||||
break;
|
||||
|
||||
if (atomic_read(&chunk->cnt) == 0) {
|
||||
max_nr -= chunk->chunk_size;
|
||||
drained_size += chunk->chunk_size;
|
||||
ion_secure_cma_free_chunk(sheap, chunk);
|
||||
} else {
|
||||
skipped_size += chunk->chunk_size;
|
||||
}
|
||||
}
|
||||
|
||||
trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
|
||||
}
|
||||
|
||||
int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(heap, struct ion_cma_secure_heap, heap);
|
||||
|
||||
mutex_lock(&sheap->chunk_lock);
|
||||
__ion_secure_cma_shrink_pool(sheap, INT_MAX);
|
||||
mutex_unlock(&sheap->chunk_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap = container_of(shrinker,
|
||||
struct ion_cma_secure_heap, shrinker);
|
||||
int nr_to_scan = sc->nr_to_scan;
|
||||
|
||||
/*
|
||||
* Allocation path may invoke the shrinker. Proceeding any further
|
||||
* would cause a deadlock in several places so don't shrink if that
|
||||
* happens.
|
||||
*/
|
||||
if (!mutex_trylock(&sheap->chunk_lock))
|
||||
return -EAGAIN;
|
||||
|
||||
__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
|
||||
|
||||
mutex_unlock(&sheap->chunk_lock);
|
||||
|
||||
return atomic_read(&sheap->total_pool_size);
|
||||
}
|
||||
|
||||
static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap = container_of(shrinker,
|
||||
struct ion_cma_secure_heap, shrinker);
|
||||
return atomic_read(&sheap->total_pool_size);
|
||||
}
|
||||
|
||||
static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap,
|
||||
dma_addr_t handle,
|
||||
unsigned long len)
|
||||
{
|
||||
struct list_head *entry, *_n;
|
||||
int total_overlap = 0;
|
||||
|
||||
mutex_lock(&sheap->chunk_lock);
|
||||
bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
|
||||
len >> PAGE_SHIFT);
|
||||
|
||||
list_for_each_safe(entry, _n, &sheap->chunks) {
|
||||
struct ion_cma_alloc_chunk *chunk = container_of(entry,
|
||||
struct ion_cma_alloc_chunk, entry);
|
||||
int overlap = intersect(chunk->handle,
|
||||
chunk->chunk_size, handle, len);
|
||||
|
||||
/*
|
||||
* Don't actually free this from the pool list yet, let either
|
||||
* an explicit drain call or the shrinkers take care of the
|
||||
* pool.
|
||||
*/
|
||||
atomic_sub_return(overlap, &chunk->cnt);
|
||||
if (atomic_read(&chunk->cnt) < 0) {
|
||||
WARN(1, "Invalid chunk size of %d\n",
|
||||
atomic_read(&chunk->cnt));
|
||||
goto out;
|
||||
}
|
||||
|
||||
total_overlap += overlap;
|
||||
}
|
||||
|
||||
if (atomic_read(&sheap->total_pool_size) < 0) {
|
||||
WARN(1, "total pool size of %d is unexpected\n",
|
||||
atomic_read(&sheap->total_pool_size));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (total_overlap != len)
|
||||
bad_math_dump(len, total_overlap, sheap, 0, handle);
|
||||
out:
|
||||
mutex_unlock(&sheap->chunk_lock);
|
||||
}
|
||||
|
||||
/* ION CMA heap operations functions */
|
||||
static struct ion_secure_cma_buffer_info *
|
||||
__ion_secure_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
unsigned long len,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(heap, struct ion_cma_secure_heap, heap);
|
||||
struct ion_secure_cma_buffer_info *info;
|
||||
int ret;
|
||||
|
||||
dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return ION_CMA_ALLOCATE_FAILED;
|
||||
|
||||
mutex_lock(&sheap->alloc_lock);
|
||||
ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
|
||||
|
||||
if (ret) {
|
||||
retry:
|
||||
ret = ion_secure_cma_add_to_pool(sheap, len, false);
|
||||
if (ret) {
|
||||
mutex_unlock(&sheap->alloc_lock);
|
||||
dev_err(sheap->dev, "Fail to allocate buffer\n");
|
||||
goto err;
|
||||
}
|
||||
ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
|
||||
if (ret) {
|
||||
/*
|
||||
* Lost the race with the shrinker, try again
|
||||
*/
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&sheap->alloc_lock);
|
||||
|
||||
atomic_add(len, &sheap->total_allocated);
|
||||
info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
|
||||
if (!info->table) {
|
||||
dev_err(sheap->dev, "Fail to allocate sg table\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
info->len = len;
|
||||
ion_secure_cma_get_sgtable(sheap->dev,
|
||||
info->table, info->phys, len);
|
||||
|
||||
/* keep this for memory release */
|
||||
buffer->priv_virt = info;
|
||||
dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
|
||||
return info;
|
||||
|
||||
err:
|
||||
kfree(info);
|
||||
return ION_CMA_ALLOCATE_FAILED;
|
||||
}
|
||||
|
||||
static void __ion_secure_cma_free_non_contig(struct ion_cma_secure_heap *sheap,
|
||||
struct ion_secure_cma_buffer_info
|
||||
*info)
|
||||
{
|
||||
struct ion_secure_cma_non_contig_info *nc_info, *temp;
|
||||
|
||||
list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, entry) {
|
||||
ion_secure_cma_free_from_pool(sheap, nc_info->phys,
|
||||
nc_info->len);
|
||||
list_del(&nc_info->entry);
|
||||
kfree(nc_info);
|
||||
}
|
||||
}
|
||||
|
||||
static void __ion_secure_cma_free(struct ion_cma_secure_heap *sheap,
|
||||
struct ion_secure_cma_buffer_info *info,
|
||||
bool release_memory)
|
||||
{
|
||||
if (release_memory) {
|
||||
if (info->ncelems)
|
||||
__ion_secure_cma_free_non_contig(sheap, info);
|
||||
else
|
||||
ion_secure_cma_free_from_pool(sheap, info->phys,
|
||||
info->len);
|
||||
}
|
||||
sg_free_table(info->table);
|
||||
kfree(info->table);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
static struct ion_secure_cma_buffer_info *
|
||||
__ion_secure_cma_allocate_non_contig(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long len,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(heap, struct ion_cma_secure_heap, heap);
|
||||
struct ion_secure_cma_buffer_info *info;
|
||||
int ret;
|
||||
unsigned long alloc_size = len;
|
||||
struct ion_secure_cma_non_contig_info *nc_info, *temp;
|
||||
unsigned long ncelems = 0;
|
||||
struct scatterlist *sg;
|
||||
unsigned long total_allocated = 0;
|
||||
|
||||
dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return ION_CMA_ALLOCATE_FAILED;
|
||||
|
||||
INIT_LIST_HEAD(&info->non_contig_list);
|
||||
info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
|
||||
if (!info->table) {
|
||||
dev_err(sheap->dev, "Fail to allocate sg table\n");
|
||||
goto err;
|
||||
}
|
||||
mutex_lock(&sheap->alloc_lock);
|
||||
while (total_allocated < len) {
|
||||
if (alloc_size < SZ_1M) {
|
||||
pr_err("Cannot allocate less than 1MB\n");
|
||||
goto err2;
|
||||
}
|
||||
nc_info = kzalloc(sizeof(*nc_info), GFP_KERNEL);
|
||||
if (!nc_info)
|
||||
goto err2;
|
||||
|
||||
ret = ion_secure_cma_alloc_from_pool(sheap, &nc_info->phys,
|
||||
alloc_size);
|
||||
if (ret) {
|
||||
retry:
|
||||
ret = ion_secure_cma_add_to_pool(sheap, alloc_size,
|
||||
false);
|
||||
if (ret) {
|
||||
alloc_size = alloc_size / 2;
|
||||
if (!IS_ALIGNED(alloc_size, SZ_1M))
|
||||
alloc_size = round_down(alloc_size,
|
||||
SZ_1M);
|
||||
kfree(nc_info);
|
||||
continue;
|
||||
}
|
||||
ret = ion_secure_cma_alloc_from_pool(sheap,
|
||||
&nc_info->phys,
|
||||
alloc_size);
|
||||
if (ret) {
|
||||
/*
|
||||
* Lost the race with the shrinker, try again
|
||||
*/
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
nc_info->len = alloc_size;
|
||||
list_add_tail(&nc_info->entry, &info->non_contig_list);
|
||||
ncelems++;
|
||||
total_allocated += alloc_size;
|
||||
alloc_size = min(alloc_size, len - total_allocated);
|
||||
}
|
||||
mutex_unlock(&sheap->alloc_lock);
|
||||
atomic_add(total_allocated, &sheap->total_allocated);
|
||||
|
||||
nc_info = list_first_entry_or_null(&info->non_contig_list,
|
||||
struct
|
||||
ion_secure_cma_non_contig_info,
|
||||
entry);
|
||||
if (!nc_info) {
|
||||
pr_err("%s: Unable to find first entry of non contig list\n",
|
||||
__func__);
|
||||
goto err1;
|
||||
}
|
||||
info->phys = nc_info->phys;
|
||||
info->len = total_allocated;
|
||||
info->ncelems = ncelems;
|
||||
|
||||
ret = sg_alloc_table(info->table, ncelems, GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
goto err1;
|
||||
|
||||
sg = info->table->sgl;
|
||||
list_for_each_entry(nc_info, &info->non_contig_list, entry) {
|
||||
sg_set_page(sg, phys_to_page(nc_info->phys), nc_info->len, 0);
|
||||
sg_dma_address(sg) = nc_info->phys;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
buffer->priv_virt = info;
|
||||
dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
|
||||
return info;
|
||||
|
||||
err2:
|
||||
mutex_unlock(&sheap->alloc_lock);
|
||||
err1:
|
||||
list_for_each_entry_safe(nc_info, temp, &info->non_contig_list,
|
||||
entry) {
|
||||
list_del(&nc_info->entry);
|
||||
kfree(nc_info);
|
||||
}
|
||||
kfree(info->table);
|
||||
err:
|
||||
kfree(info);
|
||||
return ION_CMA_ALLOCATE_FAILED;
|
||||
}
|
||||
|
||||
static int ion_secure_cma_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long len,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long secure_allocation = flags & ION_FLAG_SECURE;
|
||||
struct ion_secure_cma_buffer_info *buf = NULL;
|
||||
unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG;
|
||||
|
||||
if (!secure_allocation &&
|
||||
!ion_heap_allow_secure_allocation(heap->type)) {
|
||||
pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
|
||||
__func__, heap->name, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (ION_IS_CACHED(flags)) {
|
||||
pr_err("%s: cannot allocate cached memory from secure heap %s\n",
|
||||
__func__, heap->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED(len, SZ_1M)) {
|
||||
pr_err("%s: length of allocation from %s must be a multiple of 1MB\n",
|
||||
__func__, heap->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
trace_ion_secure_cma_allocate_start(heap->name, len, flags);
|
||||
if (!allow_non_contig)
|
||||
buf = __ion_secure_cma_allocate(heap, buffer, len,
|
||||
flags);
|
||||
else
|
||||
buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
|
||||
flags);
|
||||
trace_ion_secure_cma_allocate_end(heap->name, len, flags);
|
||||
if (buf) {
|
||||
int ret;
|
||||
|
||||
if (!msm_secure_v2_is_supported()) {
|
||||
pr_err("%s: securing buffers from clients is not supported on this platform\n",
|
||||
__func__);
|
||||
ret = 1;
|
||||
} else {
|
||||
trace_ion_cp_secure_buffer_start(heap->name, len,
|
||||
flags);
|
||||
ret = msm_secure_table(buf->table);
|
||||
trace_ion_cp_secure_buffer_end(heap->name, len,
|
||||
flags);
|
||||
}
|
||||
if (ret) {
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(buffer->heap,
|
||||
struct ion_cma_secure_heap, heap);
|
||||
|
||||
pr_err("%s: failed to secure buffer\n", __func__);
|
||||
__ion_secure_cma_free(sheap, buf, true);
|
||||
}
|
||||
return ret;
|
||||
} else {
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
static void ion_secure_cma_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(buffer->heap, struct ion_cma_secure_heap, heap);
|
||||
struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
|
||||
int ret = 0;
|
||||
|
||||
dev_dbg(sheap->dev, "Release buffer %pK\n", buffer);
|
||||
if (msm_secure_v2_is_supported())
|
||||
ret = msm_unsecure_table(info->table);
|
||||
atomic_sub(buffer->size, &sheap->total_allocated);
|
||||
if (atomic_read(&sheap->total_allocated) < 0) {
|
||||
WARN(1, "no memory is allocated from this pool\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* release memory */
|
||||
if (ret) {
|
||||
WARN(1, "Unsecure failed, can't free the memory. Leaking it!");
|
||||
atomic_add(buffer->size, &sheap->total_leaked);
|
||||
}
|
||||
|
||||
__ion_secure_cma_free(sheap, info, ret ? false : true);
|
||||
}
|
||||
|
||||
static int ion_secure_cma_mmap(struct ion_heap *mapper,
|
||||
struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
pr_info("%s: mmaping from secure heap %s disallowed\n",
|
||||
__func__, mapper->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
pr_info("%s: kernel mapping from secure heap %s disallowed\n",
|
||||
__func__, heap->name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static struct ion_heap_ops ion_secure_cma_ops = {
|
||||
.allocate = ion_secure_cma_allocate,
|
||||
.free = ion_secure_cma_free,
|
||||
.map_user = ion_secure_cma_mmap,
|
||||
.map_kernel = ion_secure_cma_map_kernel,
|
||||
.unmap_kernel = ion_secure_cma_unmap_kernel,
|
||||
};
|
||||
|
||||
struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap;
|
||||
int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long);
|
||||
|
||||
sheap = kzalloc(sizeof(*sheap), GFP_KERNEL);
|
||||
if (!sheap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
sheap->dev = data->priv;
|
||||
mutex_init(&sheap->chunk_lock);
|
||||
mutex_init(&sheap->alloc_lock);
|
||||
sheap->heap.ops = &ion_secure_cma_ops;
|
||||
sheap->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA;
|
||||
sheap->npages = data->size >> PAGE_SHIFT;
|
||||
sheap->base = data->base;
|
||||
sheap->heap_size = data->size;
|
||||
sheap->bitmap = kmalloc(map_size, GFP_KERNEL);
|
||||
INIT_LIST_HEAD(&sheap->chunks);
|
||||
INIT_WORK(&sheap->work, ion_secure_pool_pages);
|
||||
sheap->shrinker.seeks = DEFAULT_SEEKS;
|
||||
sheap->shrinker.batch = 0;
|
||||
sheap->shrinker.scan_objects = ion_secure_cma_shrinker;
|
||||
sheap->shrinker.count_objects = ion_secure_cma_shrinker_count;
|
||||
sheap->default_prefetch_size = sheap->heap_size;
|
||||
register_shrinker(&sheap->shrinker);
|
||||
|
||||
if (!sheap->bitmap) {
|
||||
kfree(sheap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* we initially mark everything in the allocator as being free so that
|
||||
* allocations can come in later
|
||||
*/
|
||||
bitmap_fill(sheap->bitmap, sheap->npages);
|
||||
|
||||
return &sheap->heap;
|
||||
}
|
||||
|
||||
void ion_secure_cma_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_cma_secure_heap *sheap =
|
||||
container_of(heap, struct ion_cma_secure_heap, heap);
|
||||
|
||||
kfree(sheap);
|
||||
}
|
|
@ -307,3 +307,98 @@ int ion_heap_init_shrinker(struct ion_heap *heap)
|
|||
|
||||
return register_shrinker(&heap->shrinker);
|
||||
}
|
||||
|
||||
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
|
||||
{
|
||||
struct ion_heap *heap = NULL;
|
||||
int heap_type = heap_data->type;
|
||||
|
||||
switch (heap_type) {
|
||||
case ION_HEAP_TYPE_SYSTEM_CONTIG:
|
||||
pr_err("%s: Heap type is disabled: %d\n", __func__,
|
||||
heap_data->type);
|
||||
break;
|
||||
case ION_HEAP_TYPE_SYSTEM:
|
||||
heap = ion_system_heap_create(heap_data);
|
||||
break;
|
||||
case ION_HEAP_TYPE_CARVEOUT:
|
||||
heap = ion_carveout_heap_create(heap_data);
|
||||
break;
|
||||
case ION_HEAP_TYPE_CHUNK:
|
||||
heap = ion_chunk_heap_create(heap_data);
|
||||
break;
|
||||
#ifdef CONFIG_CMA
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA:
|
||||
heap = ion_secure_cma_heap_create(heap_data);
|
||||
break;
|
||||
case ION_HEAP_TYPE_DMA:
|
||||
heap = ion_cma_heap_create(heap_data);
|
||||
break;
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA:
|
||||
heap = ion_cma_secure_heap_create(heap_data);
|
||||
break;
|
||||
#endif
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
|
||||
heap = ion_system_secure_heap_create(heap_data);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: Invalid heap type %d\n", __func__,
|
||||
heap_data->type);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (IS_ERR_OR_NULL(heap)) {
|
||||
pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
|
||||
__func__, heap_data->name, heap_data->type,
|
||||
&heap_data->base, heap_data->size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
heap->name = heap_data->name;
|
||||
heap->id = heap_data->id;
|
||||
heap->priv = heap_data->priv;
|
||||
return heap;
|
||||
}
|
||||
EXPORT_SYMBOL(ion_heap_create);
|
||||
|
||||
void ion_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
int heap_type;
|
||||
|
||||
if (!heap)
|
||||
return;
|
||||
|
||||
heap_type = heap->type;
|
||||
switch (heap_type) {
|
||||
case ION_HEAP_TYPE_SYSTEM_CONTIG:
|
||||
ion_system_contig_heap_destroy(heap);
|
||||
break;
|
||||
case ION_HEAP_TYPE_SYSTEM:
|
||||
ion_system_heap_destroy(heap);
|
||||
break;
|
||||
case ION_HEAP_TYPE_CARVEOUT:
|
||||
ion_carveout_heap_destroy(heap);
|
||||
break;
|
||||
case ION_HEAP_TYPE_CHUNK:
|
||||
ion_chunk_heap_destroy(heap);
|
||||
break;
|
||||
#ifdef CONFIG_CMA
|
||||
case ION_HEAP_TYPE_SECURE_DMA:
|
||||
ion_secure_cma_heap_destroy(heap);
|
||||
break;
|
||||
case ION_HEAP_TYPE_DMA:
|
||||
ion_cma_heap_destroy(heap);
|
||||
break;
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA:
|
||||
ion_cma_secure_heap_destroy(heap);
|
||||
break;
|
||||
#endif
|
||||
case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
|
||||
ion_system_secure_heap_destroy(heap);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: Invalid heap type %d\n", __func__,
|
||||
heap->type);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ion_heap_destroy);
|
||||
|
|
30
drivers/staging/android/ion/ion_kernel.h
Normal file
30
drivers/staging/android/ion/ion_kernel.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ION_KERNEL_H
|
||||
#define _ION_KERNEL_H
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include "../uapi/ion.h"
|
||||
|
||||
#ifdef CONFIG_ION
|
||||
|
||||
/*
|
||||
* Allocates an ion buffer.
|
||||
* Use IS_ERR on returned pointer to check for success.
|
||||
*/
|
||||
struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
|
||||
unsigned int flags);
|
||||
|
||||
#else
|
||||
|
||||
static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
|
||||
unsigned int flags)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ION */
|
||||
#endif /* _ION_KERNEL_H */
|
|
@ -58,33 +58,58 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
|
|||
return page;
|
||||
}
|
||||
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
|
||||
struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
BUG_ON(!pool);
|
||||
|
||||
mutex_lock(&pool->mutex);
|
||||
if (pool->high_count)
|
||||
page = ion_page_pool_remove(pool, true);
|
||||
else if (pool->low_count)
|
||||
page = ion_page_pool_remove(pool, false);
|
||||
mutex_unlock(&pool->mutex);
|
||||
|
||||
if (!page)
|
||||
if (*from_pool && mutex_trylock(&pool->mutex)) {
|
||||
if (pool->high_count)
|
||||
page = ion_page_pool_remove(pool, true);
|
||||
else if (pool->low_count)
|
||||
page = ion_page_pool_remove(pool, false);
|
||||
mutex_unlock(&pool->mutex);
|
||||
}
|
||||
if (!page) {
|
||||
page = ion_page_pool_alloc_pages(pool);
|
||||
*from_pool = false;
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to allocate from only the specified Pool and returns NULL otherwise
|
||||
*/
|
||||
struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
if (mutex_trylock(&pool->mutex)) {
|
||||
if (pool->high_count)
|
||||
page = ion_page_pool_remove(pool, true);
|
||||
else if (pool->low_count)
|
||||
page = ion_page_pool_remove(pool, false);
|
||||
mutex_unlock(&pool->mutex);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
|
||||
{
|
||||
BUG_ON(pool->order != compound_order(page));
|
||||
|
||||
ion_page_pool_add(pool, page);
|
||||
}
|
||||
|
||||
static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
|
||||
void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
|
||||
{
|
||||
ion_page_pool_free_pages(pool, page);
|
||||
}
|
||||
|
||||
int ion_page_pool_total(struct ion_page_pool *pool, bool high)
|
||||
{
|
||||
int count = pool->low_count;
|
||||
|
||||
|
@ -128,7 +153,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
|||
return freed;
|
||||
}
|
||||
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
|
||||
bool cached)
|
||||
{
|
||||
struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
|
||||
|
||||
|
@ -138,10 +164,12 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
|
|||
pool->low_count = 0;
|
||||
INIT_LIST_HEAD(&pool->low_items);
|
||||
INIT_LIST_HEAD(&pool->high_items);
|
||||
pool->gfp_mask = gfp_mask | __GFP_COMP;
|
||||
pool->gfp_mask = gfp_mask;
|
||||
pool->order = order;
|
||||
mutex_init(&pool->mutex);
|
||||
plist_node_init(&pool->list, order);
|
||||
if (cached)
|
||||
pool->cached = true;
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
|
231
drivers/staging/android/ion/ion_secure_util.c
Normal file
231
drivers/staging/android/ion/ion_secure_util.c
Normal file
|
@ -0,0 +1,231 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ion_secure_util.h"
|
||||
#include "ion.h"
|
||||
|
||||
bool is_secure_vmid_valid(int vmid)
|
||||
{
|
||||
return (vmid == VMID_CP_TOUCH ||
|
||||
vmid == VMID_CP_BITSTREAM ||
|
||||
vmid == VMID_CP_PIXEL ||
|
||||
vmid == VMID_CP_NON_PIXEL ||
|
||||
vmid == VMID_CP_CAMERA ||
|
||||
vmid == VMID_CP_SEC_DISPLAY ||
|
||||
vmid == VMID_CP_APP ||
|
||||
vmid == VMID_CP_CAMERA_PREVIEW ||
|
||||
vmid == VMID_CP_SPSS_SP ||
|
||||
vmid == VMID_CP_SPSS_SP_SHARED ||
|
||||
vmid == VMID_CP_SPSS_HLOS_SHARED);
|
||||
}
|
||||
|
||||
int get_secure_vmid(unsigned long flags)
|
||||
{
|
||||
if (flags & ION_FLAG_CP_TOUCH)
|
||||
return VMID_CP_TOUCH;
|
||||
if (flags & ION_FLAG_CP_BITSTREAM)
|
||||
return VMID_CP_BITSTREAM;
|
||||
if (flags & ION_FLAG_CP_PIXEL)
|
||||
return VMID_CP_PIXEL;
|
||||
if (flags & ION_FLAG_CP_NON_PIXEL)
|
||||
return VMID_CP_NON_PIXEL;
|
||||
if (flags & ION_FLAG_CP_CAMERA)
|
||||
return VMID_CP_CAMERA;
|
||||
if (flags & ION_FLAG_CP_SEC_DISPLAY)
|
||||
return VMID_CP_SEC_DISPLAY;
|
||||
if (flags & ION_FLAG_CP_APP)
|
||||
return VMID_CP_APP;
|
||||
if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
|
||||
return VMID_CP_CAMERA_PREVIEW;
|
||||
if (flags & ION_FLAG_CP_SPSS_SP)
|
||||
return VMID_CP_SPSS_SP;
|
||||
if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
|
||||
return VMID_CP_SPSS_SP_SHARED;
|
||||
if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
|
||||
return VMID_CP_SPSS_HLOS_SHARED;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned int count_set_bits(unsigned long val)
|
||||
{
|
||||
return ((unsigned int)bitmap_weight(&val, BITS_PER_LONG));
|
||||
}
|
||||
|
||||
static int get_vmid(unsigned long flags)
|
||||
{
|
||||
int vmid;
|
||||
|
||||
vmid = get_secure_vmid(flags);
|
||||
if (vmid < 0) {
|
||||
if (flags & ION_FLAG_CP_HLOS)
|
||||
vmid = VMID_HLOS;
|
||||
}
|
||||
return vmid;
|
||||
}
|
||||
|
||||
static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
|
||||
int nelems)
|
||||
{
|
||||
unsigned int itr = 0;
|
||||
int vmid;
|
||||
|
||||
flags = flags & ION_FLAGS_CP_MASK;
|
||||
for_each_set_bit(itr, &flags, BITS_PER_LONG) {
|
||||
vmid = get_vmid(0x1UL << itr);
|
||||
if (vmid < 0 || !nelems)
|
||||
return -EINVAL;
|
||||
|
||||
vm_list[nelems - 1] = vmid;
|
||||
nelems--;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
|
||||
int source_nelems, bool clear_page_private)
|
||||
{
|
||||
u32 dest_vmid = VMID_HLOS;
|
||||
u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
|
||||
struct scatterlist *sg;
|
||||
int ret, i;
|
||||
|
||||
if (source_nelems <= 0) {
|
||||
pr_err("%s: source_nelems invalid\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
|
||||
&dest_vmid, &dest_perms, 1);
|
||||
if (ret) {
|
||||
pr_err("%s: Unassign call failed.\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
if (clear_page_private)
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||
ClearPagePrivate(sg_page(sg));
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
|
||||
int dest_nelems, bool set_page_private)
|
||||
{
|
||||
u32 source_vmid = VMID_HLOS;
|
||||
struct scatterlist *sg;
|
||||
int *dest_perms;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
if (dest_nelems <= 0) {
|
||||
pr_err("%s: dest_nelems invalid\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dest_perms = kcalloc(dest_nelems, sizeof(*dest_perms), GFP_KERNEL);
|
||||
if (!dest_perms) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < dest_nelems; i++) {
|
||||
if (dest_vm_list[i] == VMID_CP_SEC_DISPLAY)
|
||||
dest_perms[i] = PERM_READ;
|
||||
else
|
||||
dest_perms[i] = PERM_READ | PERM_WRITE;
|
||||
}
|
||||
|
||||
ret = hyp_assign_table(sgt, &source_vmid, 1,
|
||||
dest_vm_list, dest_perms, dest_nelems);
|
||||
|
||||
if (ret) {
|
||||
pr_err("%s: Assign call failed\n",
|
||||
__func__);
|
||||
goto out_free_dest;
|
||||
}
|
||||
if (set_page_private)
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||
SetPagePrivate(sg_page(sg));
|
||||
|
||||
out_free_dest:
|
||||
kfree(dest_perms);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private)
|
||||
{
|
||||
int ret = 0;
|
||||
int *source_vm_list;
|
||||
int source_nelems;
|
||||
|
||||
source_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
|
||||
source_vm_list = kcalloc(source_nelems, sizeof(*source_vm_list),
|
||||
GFP_KERNEL);
|
||||
if (!source_vm_list)
|
||||
return -ENOMEM;
|
||||
ret = populate_vm_list(flags, source_vm_list, source_nelems);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get secure vmids\n", __func__);
|
||||
goto out_free_source;
|
||||
}
|
||||
|
||||
ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
|
||||
set_page_private);
|
||||
|
||||
out_free_source:
|
||||
kfree(source_vm_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private)
|
||||
{
|
||||
int ret = 0;
|
||||
int *dest_vm_list = NULL;
|
||||
int dest_nelems;
|
||||
|
||||
dest_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
|
||||
dest_vm_list = kcalloc(dest_nelems, sizeof(*dest_vm_list), GFP_KERNEL);
|
||||
if (!dest_vm_list) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = populate_vm_list(flags, dest_vm_list, dest_nelems);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get secure vmid(s)\n", __func__);
|
||||
goto out_free_dest_vm;
|
||||
}
|
||||
|
||||
ret = ion_hyp_assign_sg(sgt, dest_vm_list, dest_nelems,
|
||||
set_page_private);
|
||||
|
||||
out_free_dest_vm:
|
||||
kfree(dest_vm_list);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool hlos_accessible_buffer(struct ion_buffer *buffer)
|
||||
{
|
||||
if ((buffer->flags & ION_FLAG_SECURE) &&
|
||||
!(buffer->flags & ION_FLAG_CP_HLOS) &&
|
||||
!(buffer->flags & ION_FLAG_CP_SPSS_HLOS_SHARED))
|
||||
return false;
|
||||
else if ((get_secure_vmid(buffer->flags) > 0) &&
|
||||
!(buffer->flags & ION_FLAG_CP_HLOS) &&
|
||||
!(buffer->flags & ION_FLAG_CP_SPSS_HLOS_SHARED))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
24
drivers/staging/android/ion/ion_secure_util.h
Normal file
24
drivers/staging/android/ion/ion_secure_util.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "ion.h"
|
||||
|
||||
#ifndef _ION_SECURE_UTIL_H
|
||||
#define _ION_SECURE_UTIL_H
|
||||
|
||||
int get_secure_vmid(unsigned long flags);
|
||||
bool is_secure_vmid_valid(int vmid);
|
||||
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
|
||||
int dest_nelems, bool set_page_private);
|
||||
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
|
||||
int source_nelems, bool clear_page_private);
|
||||
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private);
|
||||
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
|
||||
bool set_page_private);
|
||||
|
||||
bool hlos_accessible_buffer(struct ion_buffer *buffer);
|
||||
|
||||
#endif /* _ION_SECURE_UTIL_H */
|
|
@ -3,6 +3,8 @@
|
|||
* drivers/staging/android/ion/ion_system_heap.c
|
||||
*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/page.h>
|
||||
|
@ -14,16 +16,18 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ion_system_heap.h"
|
||||
#include "ion.h"
|
||||
|
||||
#define NUM_ORDERS ARRAY_SIZE(orders)
|
||||
#include "ion_system_heap.h"
|
||||
#include "ion_system_secure_heap.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
|
||||
__GFP_NORETRY) & ~__GFP_RECLAIM;
|
||||
static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
|
||||
static const unsigned int orders[] = {8, 4, 0};
|
||||
|
||||
static int order_to_index(unsigned int order)
|
||||
int order_to_index(unsigned int order)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -39,44 +43,119 @@ static inline unsigned int order_to_size(int order)
|
|||
return PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
struct ion_system_heap {
|
||||
struct ion_heap heap;
|
||||
struct ion_page_pool *pools[NUM_ORDERS];
|
||||
struct pages_mem {
|
||||
struct page **pages;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order)
|
||||
{
|
||||
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
|
||||
|
||||
return ion_page_pool_alloc(pool);
|
||||
}
|
||||
|
||||
static void free_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer, struct page *page)
|
||||
unsigned long order,
|
||||
bool *from_pool)
|
||||
{
|
||||
bool cached = ion_buffer_cached(buffer);
|
||||
struct page *page;
|
||||
struct ion_page_pool *pool;
|
||||
unsigned int order = compound_order(page);
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
struct device *dev = heap->heap.priv;
|
||||
|
||||
/* go to system */
|
||||
if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
|
||||
__free_pages(page, order);
|
||||
return;
|
||||
}
|
||||
if (vmid > 0)
|
||||
pool = heap->secure_pools[vmid][order_to_index(order)];
|
||||
else if (!cached)
|
||||
pool = heap->uncached_pools[order_to_index(order)];
|
||||
else
|
||||
pool = heap->cached_pools[order_to_index(order)];
|
||||
|
||||
pool = heap->pools[order_to_index(order)];
|
||||
page = ion_page_pool_alloc(pool, from_pool);
|
||||
|
||||
ion_page_pool_free(pool, page);
|
||||
if (!page)
|
||||
return 0;
|
||||
|
||||
if ((MAKE_ION_ALLOC_DMA_READY && vmid <= 0) || !(*from_pool))
|
||||
ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static struct page *alloc_largest_available(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned int max_order)
|
||||
/*
|
||||
* For secure pages that need to be freed and not added back to the pool; the
|
||||
* hyp_unassign should be called before calling this function
|
||||
*/
|
||||
void free_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer, struct page *page,
|
||||
unsigned int order)
|
||||
{
|
||||
bool cached = ion_buffer_cached(buffer);
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
|
||||
struct ion_page_pool *pool;
|
||||
|
||||
if (vmid > 0)
|
||||
pool = heap->secure_pools[vmid][order_to_index(order)];
|
||||
else if (cached)
|
||||
pool = heap->cached_pools[order_to_index(order)];
|
||||
else
|
||||
pool = heap->uncached_pools[order_to_index(order)];
|
||||
|
||||
if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
|
||||
ion_page_pool_free_immediate(pool, page);
|
||||
else
|
||||
ion_page_pool_free(pool, page);
|
||||
} else {
|
||||
__free_pages(page, order);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned int max_order)
|
||||
{
|
||||
struct page *page;
|
||||
struct page_info *info;
|
||||
int i;
|
||||
bool from_pool;
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
if (size < order_to_size(orders[i]))
|
||||
continue;
|
||||
if (max_order < orders[i])
|
||||
continue;
|
||||
from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
|
||||
page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
info->page = page;
|
||||
info->order = orders[i];
|
||||
info->from_pool = from_pool;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
return info;
|
||||
}
|
||||
kfree(info);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct page_info *
|
||||
alloc_from_pool_preferred(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned int max_order)
|
||||
{
|
||||
struct page *page;
|
||||
struct page_info *info;
|
||||
int i;
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
if (size < order_to_size(orders[i]))
|
||||
|
@ -84,14 +163,88 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
|
|||
if (max_order < orders[i])
|
||||
continue;
|
||||
|
||||
page = alloc_buffer_page(heap, buffer, orders[i]);
|
||||
page = alloc_from_secure_pool_order(heap, buffer, orders[i]);
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
return page;
|
||||
info->page = page;
|
||||
info->order = orders[i];
|
||||
info->from_pool = true;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
return info;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
page = split_page_from_secure_pool(heap, buffer);
|
||||
if (page) {
|
||||
info->page = page;
|
||||
info->order = 0;
|
||||
info->from_pool = true;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
return info;
|
||||
}
|
||||
|
||||
kfree(info);
|
||||
return alloc_largest_available(heap, buffer, size, max_order);
|
||||
}
|
||||
|
||||
static unsigned int process_info(struct page_info *info,
|
||||
struct scatterlist *sg,
|
||||
struct scatterlist *sg_sync,
|
||||
struct pages_mem *data, unsigned int i)
|
||||
{
|
||||
struct page *page = info->page;
|
||||
unsigned int j;
|
||||
|
||||
if (sg_sync) {
|
||||
sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
|
||||
sg_dma_address(sg_sync) = page_to_phys(page);
|
||||
}
|
||||
sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
|
||||
/*
|
||||
* This is not correct - sg_dma_address needs a dma_addr_t
|
||||
* that is valid for the the targeted device, but this works
|
||||
* on the currently targeted hardware.
|
||||
*/
|
||||
sg_dma_address(sg) = page_to_phys(page);
|
||||
if (data) {
|
||||
for (j = 0; j < (1 << info->order); ++j)
|
||||
data->pages[i++] = nth_page(page, j);
|
||||
}
|
||||
list_del(&info->list);
|
||||
kfree(info);
|
||||
return i;
|
||||
}
|
||||
|
||||
static int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
|
||||
{
|
||||
struct page **pages;
|
||||
unsigned int page_tbl_size;
|
||||
|
||||
page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
|
||||
if (page_tbl_size > SZ_8K) {
|
||||
/*
|
||||
* Do fallback to ensure we have a balance between
|
||||
* performance and availability.
|
||||
*/
|
||||
pages = kmalloc(page_tbl_size,
|
||||
__GFP_COMP | __GFP_NORETRY |
|
||||
__GFP_NOWARN);
|
||||
if (!pages)
|
||||
pages = vmalloc(page_tbl_size);
|
||||
} else {
|
||||
pages = kmalloc(page_tbl_size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
pages_mem->pages = pages;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
|
||||
{
|
||||
kvfree(pages_mem->pages);
|
||||
}
|
||||
|
||||
static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||
|
@ -103,79 +256,184 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
|||
struct ion_system_heap,
|
||||
heap);
|
||||
struct sg_table *table;
|
||||
struct sg_table table_sync = {0};
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg_sync;
|
||||
int ret;
|
||||
struct list_head pages;
|
||||
struct page *page, *tmp_page;
|
||||
struct list_head pages_from_pool;
|
||||
struct page_info *info, *tmp_info;
|
||||
int i = 0;
|
||||
unsigned int nents_sync = 0;
|
||||
unsigned long size_remaining = PAGE_ALIGN(size);
|
||||
unsigned int max_order = orders[0];
|
||||
struct pages_mem data;
|
||||
unsigned int sz;
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
|
||||
if (size / PAGE_SIZE > totalram_pages / 2)
|
||||
return -ENOMEM;
|
||||
|
||||
data.size = 0;
|
||||
INIT_LIST_HEAD(&pages);
|
||||
INIT_LIST_HEAD(&pages_from_pool);
|
||||
|
||||
while (size_remaining > 0) {
|
||||
page = alloc_largest_available(sys_heap, buffer, size_remaining,
|
||||
max_order);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
list_add_tail(&page->lru, &pages);
|
||||
size_remaining -= PAGE_SIZE << compound_order(page);
|
||||
max_order = compound_order(page);
|
||||
if (is_secure_vmid_valid(vmid))
|
||||
info = alloc_from_pool_preferred(sys_heap, buffer,
|
||||
size_remaining,
|
||||
max_order);
|
||||
else
|
||||
info = alloc_largest_available(sys_heap, buffer,
|
||||
size_remaining,
|
||||
max_order);
|
||||
|
||||
if (!info)
|
||||
goto err;
|
||||
|
||||
sz = (1 << info->order) * PAGE_SIZE;
|
||||
|
||||
if (info->from_pool) {
|
||||
list_add_tail(&info->list, &pages_from_pool);
|
||||
} else {
|
||||
list_add_tail(&info->list, &pages);
|
||||
data.size += sz;
|
||||
++nents_sync;
|
||||
}
|
||||
size_remaining -= sz;
|
||||
max_order = info->order;
|
||||
i++;
|
||||
}
|
||||
table = kmalloc(sizeof(*table), GFP_KERNEL);
|
||||
|
||||
ret = ion_heap_alloc_pages_mem(&data);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
goto free_pages;
|
||||
goto err_free_data_pages;
|
||||
|
||||
if (sg_alloc_table(table, i, GFP_KERNEL))
|
||||
goto free_table;
|
||||
ret = sg_alloc_table(table, i, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
if (nents_sync) {
|
||||
ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_free_sg;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
sg = table->sgl;
|
||||
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
|
||||
sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
|
||||
sg_sync = table_sync.sgl;
|
||||
|
||||
/*
|
||||
* We now have two separate lists. One list contains pages from the
|
||||
* pool and the other pages from buddy. We want to merge these
|
||||
* together while preserving the ordering of the pages (higher order
|
||||
* first).
|
||||
*/
|
||||
do {
|
||||
info = list_first_entry_or_null(&pages, struct page_info, list);
|
||||
tmp_info = list_first_entry_or_null(&pages_from_pool,
|
||||
struct page_info, list);
|
||||
if (info && tmp_info) {
|
||||
if (info->order >= tmp_info->order) {
|
||||
i = process_info(info, sg, sg_sync, &data, i);
|
||||
sg_sync = sg_next(sg_sync);
|
||||
} else {
|
||||
i = process_info(tmp_info, sg, 0, 0, i);
|
||||
}
|
||||
} else if (info) {
|
||||
i = process_info(info, sg, sg_sync, &data, i);
|
||||
sg_sync = sg_next(sg_sync);
|
||||
} else if (tmp_info) {
|
||||
i = process_info(tmp_info, sg, 0, 0, i);
|
||||
}
|
||||
sg = sg_next(sg);
|
||||
list_del(&page->lru);
|
||||
|
||||
} while (sg);
|
||||
|
||||
if (nents_sync) {
|
||||
if (vmid > 0) {
|
||||
ret = ion_hyp_assign_sg(&table_sync, &vmid, 1, true);
|
||||
if (ret)
|
||||
goto err_free_sg2;
|
||||
}
|
||||
}
|
||||
|
||||
buffer->sg_table = table;
|
||||
if (nents_sync)
|
||||
sg_free_table(&table_sync);
|
||||
ion_heap_free_pages_mem(&data);
|
||||
return 0;
|
||||
|
||||
free_table:
|
||||
err_free_sg2:
|
||||
/* We failed to zero buffers. Bypass pool */
|
||||
buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
|
||||
|
||||
if (vmid > 0)
|
||||
ion_hyp_unassign_sg(table, &vmid, 1, true);
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i)
|
||||
free_buffer_page(sys_heap, buffer, sg_page(sg),
|
||||
get_order(sg->length));
|
||||
if (nents_sync)
|
||||
sg_free_table(&table_sync);
|
||||
err_free_sg:
|
||||
sg_free_table(table);
|
||||
err1:
|
||||
kfree(table);
|
||||
free_pages:
|
||||
list_for_each_entry_safe(page, tmp_page, &pages, lru)
|
||||
free_buffer_page(sys_heap, buffer, page);
|
||||
err_free_data_pages:
|
||||
ion_heap_free_pages_mem(&data);
|
||||
err:
|
||||
list_for_each_entry_safe(info, tmp_info, &pages, list) {
|
||||
free_buffer_page(sys_heap, buffer, info->page, info->order);
|
||||
kfree(info);
|
||||
}
|
||||
list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
|
||||
free_buffer_page(sys_heap, buffer, info->page, info->order);
|
||||
kfree(info);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_system_heap *sys_heap = container_of(buffer->heap,
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_system_heap *sys_heap = container_of(heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
|
||||
/* zero the buffer before goto page pool */
|
||||
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
|
||||
ion_heap_buffer_zero(buffer);
|
||||
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
|
||||
!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
|
||||
if (vmid < 0)
|
||||
ion_heap_buffer_zero(buffer);
|
||||
} else if (vmid > 0) {
|
||||
if (ion_hyp_unassign_sg(table, &vmid, 1, true))
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i)
|
||||
free_buffer_page(sys_heap, buffer, sg_page(sg));
|
||||
free_buffer_page(sys_heap, buffer, sg_page(sg),
|
||||
get_order(sg->length));
|
||||
sg_free_table(table);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
int nr_to_scan)
|
||||
{
|
||||
struct ion_page_pool *pool;
|
||||
struct ion_system_heap *sys_heap;
|
||||
int nr_total = 0;
|
||||
int i, nr_freed;
|
||||
int i, j, nr_freed = 0;
|
||||
int only_scan = 0;
|
||||
struct ion_page_pool *pool;
|
||||
|
||||
sys_heap = container_of(heap, struct ion_system_heap, heap);
|
||||
|
||||
|
@ -183,23 +441,31 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
|||
only_scan = 1;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->pools[i];
|
||||
nr_freed = 0;
|
||||
|
||||
if (only_scan) {
|
||||
nr_total += ion_page_pool_shrink(pool,
|
||||
gfp_mask,
|
||||
nr_to_scan);
|
||||
for (j = 0; j < VMID_LAST; j++) {
|
||||
if (is_secure_vmid_valid(j))
|
||||
nr_freed +=
|
||||
ion_secure_page_pool_shrink(sys_heap,
|
||||
j, i,
|
||||
nr_to_scan);
|
||||
}
|
||||
|
||||
} else {
|
||||
nr_freed = ion_page_pool_shrink(pool,
|
||||
gfp_mask,
|
||||
nr_to_scan);
|
||||
pool = sys_heap->uncached_pools[i];
|
||||
nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
|
||||
|
||||
pool = sys_heap->cached_pools[i];
|
||||
nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
|
||||
nr_total += nr_freed;
|
||||
|
||||
if (!only_scan) {
|
||||
nr_to_scan -= nr_freed;
|
||||
nr_total += nr_freed;
|
||||
/* shrink completed */
|
||||
if (nr_to_scan <= 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return nr_total;
|
||||
}
|
||||
|
||||
|
@ -215,21 +481,97 @@ static struct ion_heap_ops system_heap_ops = {
|
|||
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
|
||||
void *unused)
|
||||
{
|
||||
struct ion_system_heap *sys_heap = container_of(heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
int i;
|
||||
struct ion_system_heap *sys_heap;
|
||||
bool use_seq = s;
|
||||
unsigned long uncached_total = 0;
|
||||
unsigned long cached_total = 0;
|
||||
unsigned long secure_total = 0;
|
||||
struct ion_page_pool *pool;
|
||||
int i, j;
|
||||
|
||||
sys_heap = container_of(heap, struct ion_system_heap, heap);
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->uncached_pools[i];
|
||||
if (use_seq) {
|
||||
seq_printf(s,
|
||||
"%d order %u highmem pages in uncached pool = %lu total\n",
|
||||
pool->high_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count);
|
||||
seq_printf(s,
|
||||
"%d order %u lowmem pages in uncached pool = %lu total\n",
|
||||
pool->low_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count);
|
||||
}
|
||||
|
||||
uncached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count;
|
||||
uncached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->pools[i];
|
||||
pool = sys_heap->cached_pools[i];
|
||||
if (use_seq) {
|
||||
seq_printf(s,
|
||||
"%d order %u highmem pages in cached pool = %lu total\n",
|
||||
pool->high_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count);
|
||||
seq_printf(s,
|
||||
"%d order %u lowmem pages in cached pool = %lu total\n",
|
||||
pool->low_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count);
|
||||
}
|
||||
|
||||
seq_printf(s, "%d order %u highmem pages %lu total\n",
|
||||
pool->high_count, pool->order,
|
||||
(PAGE_SIZE << pool->order) * pool->high_count);
|
||||
seq_printf(s, "%d order %u lowmem pages %lu total\n",
|
||||
pool->low_count, pool->order,
|
||||
(PAGE_SIZE << pool->order) * pool->low_count);
|
||||
cached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count;
|
||||
cached_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
for (j = 0; j < VMID_LAST; j++) {
|
||||
if (!is_secure_vmid_valid(j))
|
||||
continue;
|
||||
pool = sys_heap->secure_pools[j][i];
|
||||
|
||||
if (use_seq) {
|
||||
seq_printf(s,
|
||||
"VMID %d: %d order %u highmem pages in secure pool = %lu total\n",
|
||||
j, pool->high_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count);
|
||||
seq_printf(s,
|
||||
"VMID %d: %d order %u lowmem pages in secure pool = %lu total\n",
|
||||
j, pool->low_count, pool->order,
|
||||
(1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count);
|
||||
}
|
||||
|
||||
secure_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->high_count;
|
||||
secure_total += (1 << pool->order) * PAGE_SIZE *
|
||||
pool->low_count;
|
||||
}
|
||||
}
|
||||
|
||||
if (use_seq) {
|
||||
seq_puts(s, "--------------------------------------------\n");
|
||||
seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n",
|
||||
uncached_total, cached_total, secure_total);
|
||||
seq_printf(s, "pool total (uncached + cached + secure) = %lu\n",
|
||||
uncached_total + cached_total + secure_total);
|
||||
seq_puts(s, "--------------------------------------------\n");
|
||||
} else {
|
||||
pr_info("-------------------------------------------------\n");
|
||||
pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n",
|
||||
uncached_total, cached_total, secure_total);
|
||||
pr_info("pool total (uncached + cached + secure) = %lu\n",
|
||||
uncached_total + cached_total + secure_total);
|
||||
pr_info("-------------------------------------------------\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -244,32 +586,38 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
|
|||
ion_page_pool_destroy(pools[i]);
|
||||
}
|
||||
|
||||
static int ion_system_heap_create_pools(struct ion_page_pool **pools)
|
||||
/**
|
||||
* ion_system_heap_create_pools - Creates pools for all orders
|
||||
*
|
||||
* If this fails you don't need to destroy any pools. It's all or
|
||||
* nothing. If it succeeds you'll eventually need to use
|
||||
* ion_system_heap_destroy_pools to destroy the pools.
|
||||
*/
|
||||
static int ion_system_heap_create_pools(struct ion_page_pool **pools,
|
||||
bool cached)
|
||||
{
|
||||
int i;
|
||||
gfp_t gfp_flags = low_order_gfp_flags;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
struct ion_page_pool *pool;
|
||||
gfp_t gfp_flags = low_order_gfp_flags;
|
||||
|
||||
if (orders[i] > 4)
|
||||
if (orders[i])
|
||||
gfp_flags = high_order_gfp_flags;
|
||||
|
||||
pool = ion_page_pool_create(gfp_flags, orders[i]);
|
||||
pool = ion_page_pool_create(gfp_flags, orders[i], cached);
|
||||
if (!pool)
|
||||
goto err_create_pool;
|
||||
pools[i] = pool;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_create_pool:
|
||||
ion_system_heap_destroy_pools(pools);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct ion_heap *__ion_system_heap_create(void)
|
||||
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
|
||||
{
|
||||
struct ion_system_heap *heap;
|
||||
int i;
|
||||
|
||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||
if (!heap)
|
||||
|
@ -278,30 +626,53 @@ static struct ion_heap *__ion_system_heap_create(void)
|
|||
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
|
||||
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
|
||||
|
||||
if (ion_system_heap_create_pools(heap->pools))
|
||||
goto free_heap;
|
||||
for (i = 0; i < VMID_LAST; i++)
|
||||
if (is_secure_vmid_valid(i))
|
||||
if (ion_system_heap_create_pools(heap->secure_pools[i],
|
||||
false))
|
||||
goto destroy_secure_pools;
|
||||
|
||||
if (ion_system_heap_create_pools(heap->uncached_pools, false))
|
||||
goto destroy_secure_pools;
|
||||
|
||||
if (ion_system_heap_create_pools(heap->cached_pools, true))
|
||||
goto destroy_uncached_pools;
|
||||
|
||||
mutex_init(&heap->split_page_mutex);
|
||||
|
||||
heap->heap.debug_show = ion_system_heap_debug_show;
|
||||
return &heap->heap;
|
||||
|
||||
free_heap:
|
||||
destroy_uncached_pools:
|
||||
ion_system_heap_destroy_pools(heap->uncached_pools);
|
||||
destroy_secure_pools:
|
||||
for (i = 0; i < VMID_LAST; i++) {
|
||||
if (heap->secure_pools[i])
|
||||
ion_system_heap_destroy_pools(heap->secure_pools[i]);
|
||||
}
|
||||
kfree(heap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int ion_system_heap_create(void)
|
||||
void ion_system_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
struct ion_system_heap *sys_heap = container_of(heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
int i, j;
|
||||
|
||||
heap = __ion_system_heap_create();
|
||||
if (IS_ERR(heap))
|
||||
return PTR_ERR(heap);
|
||||
heap->name = "ion_system_heap";
|
||||
for (i = 0; i < VMID_LAST; i++) {
|
||||
if (!is_secure_vmid_valid(i))
|
||||
continue;
|
||||
for (j = 0; j < NUM_ORDERS; j++)
|
||||
ion_secure_page_pool_shrink(sys_heap, i, j, UINT_MAX);
|
||||
|
||||
ion_device_add_heap(heap);
|
||||
return 0;
|
||||
ion_system_heap_destroy_pools(sys_heap->secure_pools[i]);
|
||||
}
|
||||
ion_system_heap_destroy_pools(sys_heap->uncached_pools);
|
||||
ion_system_heap_destroy_pools(sys_heap->cached_pools);
|
||||
kfree(sys_heap);
|
||||
}
|
||||
device_initcall(ion_system_heap_create);
|
||||
|
||||
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
|
@ -338,6 +709,8 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
|||
|
||||
buffer->sg_table = table;
|
||||
|
||||
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
|
||||
|
||||
return 0;
|
||||
|
||||
free_table:
|
||||
|
@ -370,7 +743,7 @@ static struct ion_heap_ops kmalloc_ops = {
|
|||
.map_user = ion_heap_map_user,
|
||||
};
|
||||
|
||||
static struct ion_heap *__ion_system_contig_heap_create(void)
|
||||
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
|
||||
|
@ -379,20 +752,10 @@ static struct ion_heap *__ion_system_contig_heap_create(void)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
heap->ops = &kmalloc_ops;
|
||||
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
|
||||
heap->name = "ion_system_contig_heap";
|
||||
return heap;
|
||||
}
|
||||
|
||||
static int ion_system_contig_heap_create(void)
|
||||
void ion_system_contig_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_heap *heap;
|
||||
|
||||
heap = __ion_system_contig_heap_create();
|
||||
if (IS_ERR(heap))
|
||||
return PTR_ERR(heap);
|
||||
|
||||
ion_device_add_heap(heap);
|
||||
return 0;
|
||||
kfree(heap);
|
||||
}
|
||||
device_initcall(ion_system_contig_heap_create);
|
||||
|
||||
|
|
45
drivers/staging/android/ion/ion_system_heap.h
Normal file
45
drivers/staging/android/ion/ion_system_heap.h
Normal file
|
@ -0,0 +1,45 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ion.h"
|
||||
|
||||
#ifndef _ION_SYSTEM_HEAP_H
|
||||
#define _ION_SYSTEM_HEAP_H
|
||||
|
||||
#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
|
||||
#if defined(CONFIG_IOMMU_IO_PGTABLE_ARMV7S)
|
||||
static const unsigned int orders[] = {8, 4, 0};
|
||||
#else
|
||||
static const unsigned int orders[] = {9, 4, 0};
|
||||
#endif
|
||||
#else
|
||||
static const unsigned int orders[] = {0};
|
||||
#endif
|
||||
|
||||
#define NUM_ORDERS ARRAY_SIZE(orders)
|
||||
|
||||
struct ion_system_heap {
|
||||
struct ion_heap heap;
|
||||
struct ion_page_pool *uncached_pools[MAX_ORDER];
|
||||
struct ion_page_pool *cached_pools[MAX_ORDER];
|
||||
struct ion_page_pool *secure_pools[VMID_LAST][MAX_ORDER];
|
||||
/* Prevents unnecessary page splitting */
|
||||
struct mutex split_page_mutex;
|
||||
};
|
||||
|
||||
struct page_info {
|
||||
struct page *page;
|
||||
bool from_pool;
|
||||
unsigned int order;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
int order_to_index(unsigned int order);
|
||||
|
||||
void free_buffer_page(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer, struct page *page,
|
||||
unsigned int order);
|
||||
|
||||
#endif /* _ION_SYSTEM_HEAP_H */
|
501
drivers/staging/android/ion/ion_system_secure_heap.c
Normal file
501
drivers/staging/android/ion/ion_system_secure_heap.c
Normal file
|
@ -0,0 +1,501 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "ion_system_secure_heap.h"
|
||||
#include "ion_system_heap.h"
|
||||
#include "ion.h"
|
||||
#include "ion_secure_util.h"
|
||||
|
||||
struct ion_system_secure_heap {
|
||||
struct ion_heap *sys_heap;
|
||||
struct ion_heap heap;
|
||||
|
||||
/* Protects prefetch_list */
|
||||
spinlock_t work_lock;
|
||||
bool destroy_heap;
|
||||
struct list_head prefetch_list;
|
||||
struct delayed_work prefetch_work;
|
||||
};
|
||||
|
||||
struct prefetch_info {
|
||||
struct list_head list;
|
||||
int vmid;
|
||||
u64 size;
|
||||
bool shrink;
|
||||
};
|
||||
|
||||
/*
|
||||
* The video client may not hold the last reference count on the
|
||||
* ion_buffer(s). Delay for a short time after the video client sends
|
||||
* the IOC_DRAIN event to increase the chance that the reference
|
||||
* count drops to zero. Time in milliseconds.
|
||||
*/
|
||||
#define SHRINK_DELAY 1000
|
||||
|
||||
int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
|
||||
{
|
||||
return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
|
||||
}
|
||||
|
||||
static bool is_cp_flag_present(unsigned long flags)
|
||||
{
|
||||
return flags & (ION_FLAG_CP_TOUCH |
|
||||
ION_FLAG_CP_BITSTREAM |
|
||||
ION_FLAG_CP_PIXEL |
|
||||
ION_FLAG_CP_NON_PIXEL |
|
||||
ION_FLAG_CP_CAMERA);
|
||||
}
|
||||
|
||||
static void ion_system_secure_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
struct ion_heap *heap = buffer->heap;
|
||||
struct ion_system_secure_heap *secure_heap = container_of(heap,
|
||||
struct ion_system_secure_heap,
|
||||
heap);
|
||||
buffer->heap = secure_heap->sys_heap;
|
||||
secure_heap->sys_heap->ops->free(buffer);
|
||||
}
|
||||
|
||||
static int ion_system_secure_heap_allocate(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ion_system_secure_heap *secure_heap = container_of(heap,
|
||||
struct ion_system_secure_heap,
|
||||
heap);
|
||||
|
||||
if (!ion_heap_is_system_secure_heap_type(secure_heap->heap.type) ||
|
||||
!is_cp_flag_present(flags)) {
|
||||
pr_info("%s: Incorrect heap type or incorrect flags\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = secure_heap->sys_heap->ops->allocate(secure_heap->sys_heap,
|
||||
buffer, size, flags);
|
||||
if (ret) {
|
||||
pr_info("%s: Failed to get allocation for %s, ret = %d\n",
|
||||
__func__, heap->name, ret);
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void process_one_prefetch(struct ion_heap *sys_heap,
|
||||
struct prefetch_info *info)
|
||||
{
|
||||
struct ion_buffer buffer;
|
||||
int ret;
|
||||
int vmid;
|
||||
|
||||
buffer.heap = sys_heap;
|
||||
buffer.flags = 0;
|
||||
|
||||
ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size,
|
||||
buffer.flags);
|
||||
if (ret) {
|
||||
pr_debug("%s: Failed to prefetch 0x%zx, ret = %d\n",
|
||||
__func__, info->size, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
vmid = get_secure_vmid(info->vmid);
|
||||
if (vmid < 0)
|
||||
goto out;
|
||||
|
||||
ret = ion_hyp_assign_sg(buffer.sg_table, &vmid, 1, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Now free it to the secure heap */
|
||||
buffer.heap = sys_heap;
|
||||
buffer.flags = info->vmid;
|
||||
|
||||
out:
|
||||
sys_heap->ops->free(&buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since no lock is held, results are approximate.
|
||||
*/
|
||||
size_t ion_system_secure_heap_page_pool_total(struct ion_heap *heap,
|
||||
int vmid_flags)
|
||||
{
|
||||
struct ion_system_heap *sys_heap;
|
||||
struct ion_page_pool *pool;
|
||||
size_t total = 0;
|
||||
int vmid, i;
|
||||
|
||||
sys_heap = container_of(heap, struct ion_system_heap, heap);
|
||||
vmid = get_secure_vmid(vmid_flags);
|
||||
if (vmid < 0)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < NUM_ORDERS; i++) {
|
||||
pool = sys_heap->secure_pools[vmid][i];
|
||||
total += ion_page_pool_total(pool, true);
|
||||
}
|
||||
|
||||
return total << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void process_one_shrink(struct ion_heap *sys_heap,
|
||||
struct prefetch_info *info)
|
||||
{
|
||||
struct ion_buffer buffer;
|
||||
size_t pool_size, size;
|
||||
int ret;
|
||||
|
||||
buffer.heap = sys_heap;
|
||||
buffer.flags = info->vmid;
|
||||
|
||||
pool_size = ion_system_secure_heap_page_pool_total(sys_heap,
|
||||
info->vmid);
|
||||
size = min_t(size_t, pool_size, info->size);
|
||||
ret = sys_heap->ops->allocate(sys_heap, &buffer, size, buffer.flags);
|
||||
if (ret) {
|
||||
pr_debug("%s: Failed to shrink 0x%zx, ret = %d\n",
|
||||
__func__, info->size, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
|
||||
sys_heap->ops->free(&buffer);
|
||||
}
|
||||
|
||||
static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap = container_of(work,
|
||||
struct ion_system_secure_heap,
|
||||
prefetch_work.work);
|
||||
struct ion_heap *sys_heap = secure_heap->sys_heap;
|
||||
struct prefetch_info *info, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
list_for_each_entry_safe(info, tmp,
|
||||
&secure_heap->prefetch_list, list) {
|
||||
list_del(&info->list);
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
|
||||
if (info->shrink)
|
||||
process_one_shrink(sys_heap, info);
|
||||
else
|
||||
process_one_prefetch(sys_heap, info);
|
||||
|
||||
kfree(info);
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
}
|
||||
|
||||
static int alloc_prefetch_info(struct ion_prefetch_regions __user *
|
||||
user_regions, bool shrink,
|
||||
struct list_head *items)
|
||||
{
|
||||
struct prefetch_info *info;
|
||||
u64 __user *user_sizes;
|
||||
int err;
|
||||
unsigned int nr_sizes, vmid, i;
|
||||
|
||||
err = get_user(nr_sizes, &user_regions->nr_sizes);
|
||||
err |= get_user(user_sizes, &user_regions->sizes);
|
||||
err |= get_user(vmid, &user_regions->vmid);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
if (!is_secure_vmid_valid(get_secure_vmid(vmid)))
|
||||
return -EINVAL;
|
||||
|
||||
if (nr_sizes > 0x10)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < nr_sizes; i++) {
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
err = get_user(info->size, &user_sizes[i]);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
info->vmid = vmid;
|
||||
info->shrink = shrink;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
list_add_tail(&info->list, items);
|
||||
}
|
||||
return err;
|
||||
out_free:
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
|
||||
bool shrink)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap = container_of(heap,
|
||||
struct ion_system_secure_heap,
|
||||
heap);
|
||||
struct ion_prefetch_data *data = ptr;
|
||||
int i, ret = 0;
|
||||
struct prefetch_info *info, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(items);
|
||||
|
||||
if ((int)heap->type != ION_HEAP_TYPE_SYSTEM_SECURE)
|
||||
return -EINVAL;
|
||||
|
||||
if (data->nr_regions > 0x10)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < data->nr_regions; i++) {
|
||||
ret = alloc_prefetch_info(&data->regions[i], shrink, &items);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
if (secure_heap->destroy_heap) {
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
goto out_free;
|
||||
}
|
||||
list_splice_init(&items, &secure_heap->prefetch_list);
|
||||
schedule_delayed_work(&secure_heap->prefetch_work,
|
||||
shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
list_for_each_entry_safe(info, tmp, &items, list) {
|
||||
list_del(&info->list);
|
||||
kfree(info);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *ptr)
|
||||
{
|
||||
return __ion_system_secure_heap_resize(heap, ptr, false);
|
||||
}
|
||||
|
||||
int ion_system_secure_heap_drain(struct ion_heap *heap, void *ptr)
|
||||
{
|
||||
return __ion_system_secure_heap_resize(heap, ptr, true);
|
||||
}
|
||||
|
||||
static void *ion_system_secure_heap_map_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
pr_info("%s: Kernel mapping from secure heap %s disallowed\n",
|
||||
__func__, heap->name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void ion_system_secure_heap_unmap_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
}
|
||||
|
||||
static int ion_system_secure_heap_map_user(struct ion_heap *mapper,
|
||||
struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
pr_info("%s: Mapping from secure heap %s disallowed\n",
|
||||
__func__, mapper->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ion_system_secure_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap = container_of(heap,
|
||||
struct ion_system_secure_heap,
|
||||
heap);
|
||||
|
||||
return secure_heap->sys_heap->ops->shrink(secure_heap->sys_heap,
|
||||
gfp_mask, nr_to_scan);
|
||||
}
|
||||
|
||||
static struct ion_heap_ops system_secure_heap_ops = {
|
||||
.allocate = ion_system_secure_heap_allocate,
|
||||
.free = ion_system_secure_heap_free,
|
||||
.map_kernel = ion_system_secure_heap_map_kernel,
|
||||
.unmap_kernel = ion_system_secure_heap_unmap_kernel,
|
||||
.map_user = ion_system_secure_heap_map_user,
|
||||
.shrink = ion_system_secure_heap_shrink,
|
||||
};
|
||||
|
||||
struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *unused)
|
||||
{
|
||||
struct ion_system_secure_heap *heap;
|
||||
|
||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||
if (!heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
heap->heap.ops = &system_secure_heap_ops;
|
||||
heap->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE;
|
||||
heap->sys_heap = get_ion_heap(ION_SYSTEM_HEAP_ID);
|
||||
|
||||
heap->destroy_heap = false;
|
||||
heap->work_lock = __SPIN_LOCK_UNLOCKED(heap->work_lock);
|
||||
INIT_LIST_HEAD(&heap->prefetch_list);
|
||||
INIT_DELAYED_WORK(&heap->prefetch_work,
|
||||
ion_system_secure_heap_prefetch_work);
|
||||
return &heap->heap;
|
||||
}
|
||||
|
||||
void ion_system_secure_heap_destroy(struct ion_heap *heap)
|
||||
{
|
||||
struct ion_system_secure_heap *secure_heap = container_of(heap,
|
||||
struct ion_system_secure_heap,
|
||||
heap);
|
||||
unsigned long flags;
|
||||
LIST_HEAD(items);
|
||||
struct prefetch_info *info, *tmp;
|
||||
|
||||
/* Stop any pending/future work */
|
||||
spin_lock_irqsave(&secure_heap->work_lock, flags);
|
||||
secure_heap->destroy_heap = true;
|
||||
list_splice_init(&secure_heap->prefetch_list, &items);
|
||||
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
|
||||
|
||||
cancel_delayed_work_sync(&secure_heap->prefetch_work);
|
||||
|
||||
list_for_each_entry_safe(info, tmp, &items, list) {
|
||||
list_del(&info->list);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
kfree(heap);
|
||||
}
|
||||
|
||||
struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order)
|
||||
{
|
||||
int vmid = get_secure_vmid(buffer->flags);
|
||||
struct ion_page_pool *pool;
|
||||
|
||||
if (!is_secure_vmid_valid(vmid))
|
||||
return NULL;
|
||||
|
||||
pool = heap->secure_pools[vmid][order_to_index(order)];
|
||||
return ion_page_pool_alloc_pool_only(pool);
|
||||
}
|
||||
|
||||
struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
int i, j;
|
||||
struct page *page;
|
||||
unsigned int order;
|
||||
|
||||
mutex_lock(&heap->split_page_mutex);
|
||||
|
||||
/*
|
||||
* Someone may have just split a page and returned the unused portion
|
||||
* back to the pool, so try allocating from the pool one more time
|
||||
* before splitting. We want to maintain large pages sizes when
|
||||
* possible.
|
||||
*/
|
||||
page = alloc_from_secure_pool_order(heap, buffer, 0);
|
||||
if (page)
|
||||
goto got_page;
|
||||
|
||||
for (i = NUM_ORDERS - 2; i >= 0; i--) {
|
||||
order = orders[i];
|
||||
page = alloc_from_secure_pool_order(heap, buffer, order);
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
split_page(page, order);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Return the remaining order-0 pages to the pool.
|
||||
* SetPagePrivate flag to mark memory as secure.
|
||||
*/
|
||||
if (page) {
|
||||
for (j = 1; j < (1 << order); j++) {
|
||||
SetPagePrivate(page + j);
|
||||
free_buffer_page(heap, buffer, page + j, 0);
|
||||
}
|
||||
}
|
||||
got_page:
|
||||
mutex_unlock(&heap->split_page_mutex);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
|
||||
int vmid, int order_idx, int nr_to_scan)
|
||||
{
|
||||
int ret, freed = 0;
|
||||
int order = orders[order_idx];
|
||||
struct page *page, *tmp;
|
||||
struct sg_table sgt;
|
||||
struct scatterlist *sg;
|
||||
struct ion_page_pool *pool = sys_heap->secure_pools[vmid][order_idx];
|
||||
LIST_HEAD(pages);
|
||||
|
||||
if (nr_to_scan == 0)
|
||||
return ion_page_pool_total(pool, true);
|
||||
|
||||
while (freed < nr_to_scan) {
|
||||
page = ion_page_pool_alloc_pool_only(pool);
|
||||
if (!page)
|
||||
break;
|
||||
list_add(&page->lru, &pages);
|
||||
freed += (1 << order);
|
||||
}
|
||||
|
||||
if (!freed)
|
||||
return freed;
|
||||
|
||||
ret = sg_alloc_table(&sgt, (freed >> order), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out1;
|
||||
sg = sgt.sgl;
|
||||
list_for_each_entry(page, &pages, lru) {
|
||||
sg_set_page(sg, page, (1 << order) * PAGE_SIZE, 0);
|
||||
sg_dma_address(sg) = page_to_phys(page);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
|
||||
goto out2;
|
||||
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
ion_page_pool_free_immediate(pool, page);
|
||||
}
|
||||
|
||||
sg_free_table(&sgt);
|
||||
return freed;
|
||||
|
||||
out1:
|
||||
/* Restore pages to secure pool */
|
||||
list_for_each_entry_safe(page, tmp, &pages, lru) {
|
||||
list_del(&page->lru);
|
||||
ion_page_pool_free(pool, page);
|
||||
}
|
||||
return 0;
|
||||
out2:
|
||||
/*
|
||||
* The security state of the pages is unknown after a failure;
|
||||
* They can neither be added back to the secure pool nor buddy system.
|
||||
*/
|
||||
sg_free_table(&sgt);
|
||||
return 0;
|
||||
}
|
24
drivers/staging/android/ion/ion_system_secure_heap.h
Normal file
24
drivers/staging/android/ion/ion_system_secure_heap.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include "ion.h"
|
||||
#include "ion_system_heap.h"
|
||||
|
||||
#ifndef _ION_SYSTEM_SECURE_HEAP_H
|
||||
#define _ION_SYSTEM_SECURE_HEAP_H
|
||||
|
||||
int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
|
||||
int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
|
||||
|
||||
struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer,
|
||||
unsigned long order);
|
||||
|
||||
struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
|
||||
struct ion_buffer *buffer);
|
||||
|
||||
int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
|
||||
int vmid, int order_idx, int nr_to_scan);
|
||||
|
||||
#endif /* _ION_SYSTEM_SECURE_HEAP_H */
|
2
drivers/staging/android/ion/msm/Makefile
Normal file
2
drivers/staging/android/ion/msm/Makefile
Normal file
|
@ -0,0 +1,2 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-y += msm_ion_of.o
|
385
drivers/staging/android/ion/msm/msm_ion_of.c
Normal file
385
drivers/staging/android/ion/msm/msm_ion_of.c
Normal file
|
@ -0,0 +1,385 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/module.h>
|
||||
#include "../ion.h"
|
||||
|
||||
#define ION_COMPAT_STR "qcom,msm-ion"
|
||||
|
||||
static struct ion_device *idev;
|
||||
static int num_heaps;
|
||||
static struct ion_heap **heaps;
|
||||
|
||||
struct ion_heap_desc {
|
||||
unsigned int id;
|
||||
enum ion_heap_type type;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static struct ion_heap_desc ion_heap_meta[] = {
|
||||
{
|
||||
.id = ION_SYSTEM_HEAP_ID,
|
||||
.name = ION_SYSTEM_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SECURE_HEAP_ID,
|
||||
.name = ION_SECURE_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_CP_MM_HEAP_ID,
|
||||
.name = ION_MM_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_QSECOM_HEAP_ID,
|
||||
.name = ION_QSECOM_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_QSECOM_TA_HEAP_ID,
|
||||
.name = ION_QSECOM_TA_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SPSS_HEAP_ID,
|
||||
.name = ION_SPSS_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_ADSP_HEAP_ID,
|
||||
.name = ION_ADSP_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_SECURE_DISPLAY_HEAP_ID,
|
||||
.name = ION_SECURE_DISPLAY_HEAP_NAME,
|
||||
},
|
||||
{
|
||||
.id = ION_AUDIO_HEAP_ID,
|
||||
.name = ION_AUDIO_HEAP_NAME,
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
|
||||
.heap_type = ION_HEAP_TYPE_##h, }
|
||||
|
||||
static struct heap_types_info {
|
||||
const char *name;
|
||||
int heap_type;
|
||||
} heap_types_info[] = {
|
||||
MAKE_HEAP_TYPE_MAPPING(SYSTEM),
|
||||
MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
|
||||
MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
|
||||
MAKE_HEAP_TYPE_MAPPING(CHUNK),
|
||||
MAKE_HEAP_TYPE_MAPPING(DMA),
|
||||
MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
|
||||
MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
|
||||
MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
|
||||
};
|
||||
|
||||
static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
|
||||
int *heap_type)
|
||||
{
|
||||
const char *name;
|
||||
int i, ret = -EINVAL;
|
||||
|
||||
ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
|
||||
if (ret)
|
||||
goto out;
|
||||
for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
|
||||
if (!strcmp(heap_types_info[i].name, name)) {
|
||||
*heap_type = heap_types_info[i].heap_type;
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
|
||||
name, __FILE__);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_ion_populate_heap(struct device_node *node,
|
||||
struct ion_platform_heap *heap)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret = -EINVAL, heap_type = -1;
|
||||
unsigned int len = ARRAY_SIZE(ion_heap_meta);
|
||||
|
||||
for (i = 0; i < len; ++i) {
|
||||
if (ion_heap_meta[i].id == heap->id) {
|
||||
heap->name = ion_heap_meta[i].name;
|
||||
ret = msm_ion_get_heap_type_from_dt_node(node,
|
||||
&heap_type);
|
||||
if (ret)
|
||||
break;
|
||||
heap->type = heap_type;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_pdata(const struct ion_platform_data *pdata)
|
||||
{
|
||||
kfree(pdata->heaps);
|
||||
kfree(pdata);
|
||||
}
|
||||
|
||||
static int msm_ion_get_heap_dt_data(struct device_node *node,
|
||||
struct ion_platform_heap *heap)
|
||||
{
|
||||
struct device_node *pnode;
|
||||
int ret = -EINVAL;
|
||||
|
||||
pnode = of_parse_phandle(node, "memory-region", 0);
|
||||
if (pnode) {
|
||||
const __be32 *basep;
|
||||
u64 size = 0;
|
||||
u64 base = 0;
|
||||
|
||||
basep = of_get_address(pnode, 0, &size, NULL);
|
||||
if (!basep) {
|
||||
struct device *dev = heap->priv;
|
||||
|
||||
if (dev->cma_area) {
|
||||
base = cma_get_base(dev->cma_area);
|
||||
size = cma_get_size(dev->cma_area);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
base = of_translate_address(pnode, basep);
|
||||
if (base != OF_BAD_ADDR)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
heap->base = base;
|
||||
heap->size = size;
|
||||
}
|
||||
of_node_put(pnode);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
WARN(ret, "Failed to parse DT node for heap %s\n", heap->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
|
||||
{
|
||||
struct ion_platform_data *pdata = 0;
|
||||
struct ion_platform_heap *heaps = NULL;
|
||||
struct device_node *node;
|
||||
struct platform_device *new_dev = NULL;
|
||||
const struct device_node *dt_node = pdev->dev.of_node;
|
||||
const __be32 *val;
|
||||
int ret = -EINVAL;
|
||||
u32 num_heaps = 0;
|
||||
int idx = 0;
|
||||
|
||||
for_each_available_child_of_node(dt_node, node)
|
||||
num_heaps++;
|
||||
|
||||
if (!num_heaps)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
heaps = kcalloc(num_heaps, sizeof(struct ion_platform_heap),
|
||||
GFP_KERNEL);
|
||||
if (!heaps) {
|
||||
kfree(pdata);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pdata->heaps = heaps;
|
||||
pdata->nr = num_heaps;
|
||||
|
||||
for_each_available_child_of_node(dt_node, node) {
|
||||
new_dev = of_platform_device_create(node, NULL, &pdev->dev);
|
||||
if (!new_dev) {
|
||||
pr_err("Failed to create device %s\n", node->name);
|
||||
goto free_heaps;
|
||||
}
|
||||
of_dma_configure(&new_dev->dev, node);
|
||||
|
||||
pdata->heaps[idx].priv = &new_dev->dev;
|
||||
val = of_get_address(node, 0, NULL, NULL);
|
||||
if (!val) {
|
||||
pr_err("%s: Unable to find reg key", __func__);
|
||||
goto free_heaps;
|
||||
}
|
||||
pdata->heaps[idx].id = (u32)of_read_number(val, 1);
|
||||
|
||||
ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
|
||||
if (ret)
|
||||
goto free_heaps;
|
||||
|
||||
ret = msm_ion_get_heap_dt_data(node, &pdata->heaps[idx]);
|
||||
if (ret)
|
||||
goto free_heaps;
|
||||
|
||||
++idx;
|
||||
}
|
||||
return pdata;
|
||||
|
||||
free_heaps:
|
||||
free_pdata(pdata);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
#else
|
||||
static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_pdata(const struct ion_platform_data *pdata)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct ion_heap *get_ion_heap(int heap_id)
|
||||
{
|
||||
int i;
|
||||
struct ion_heap *heap;
|
||||
|
||||
for (i = 0; i < num_heaps; i++) {
|
||||
heap = heaps[i];
|
||||
if (heap->id == heap_id)
|
||||
return heap;
|
||||
}
|
||||
|
||||
pr_err("%s: heap_id %d not found\n", __func__, heap_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int msm_ion_probe(struct platform_device *pdev)
|
||||
{
|
||||
static struct ion_device *new_dev;
|
||||
struct ion_platform_data *pdata;
|
||||
unsigned int pdata_needs_to_be_freed;
|
||||
int err = -1;
|
||||
int i;
|
||||
|
||||
if (pdev->dev.of_node) {
|
||||
pdata = msm_ion_parse_dt(pdev);
|
||||
if (IS_ERR(pdata))
|
||||
return PTR_ERR(pdata);
|
||||
pdata_needs_to_be_freed = 1;
|
||||
} else {
|
||||
pdata = pdev->dev.platform_data;
|
||||
pdata_needs_to_be_freed = 0;
|
||||
}
|
||||
|
||||
num_heaps = pdata->nr;
|
||||
|
||||
heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
|
||||
|
||||
if (!heaps) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_dev = ion_device_create();
|
||||
if (IS_ERR_OR_NULL(new_dev)) {
|
||||
/*
|
||||
* set this to the ERR to indicate to the clients
|
||||
* that Ion failed to probe.
|
||||
*/
|
||||
idev = new_dev;
|
||||
err = PTR_ERR(new_dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* create the heaps as specified in the board file */
|
||||
for (i = 0; i < num_heaps; i++) {
|
||||
struct ion_platform_heap *heap_data = &pdata->heaps[i];
|
||||
|
||||
heaps[i] = ion_heap_create(heap_data);
|
||||
if (IS_ERR_OR_NULL(heaps[i])) {
|
||||
heaps[i] = 0;
|
||||
continue;
|
||||
} else {
|
||||
if (heap_data->size)
|
||||
pr_info("ION heap %s created at %pa with size %zx\n",
|
||||
heap_data->name,
|
||||
&heap_data->base,
|
||||
heap_data->size);
|
||||
else
|
||||
pr_info("ION heap %s created\n",
|
||||
heap_data->name);
|
||||
}
|
||||
|
||||
ion_device_add_heap(new_dev, heaps[i]);
|
||||
}
|
||||
if (pdata_needs_to_be_freed)
|
||||
free_pdata(pdata);
|
||||
|
||||
platform_set_drvdata(pdev, new_dev);
|
||||
/*
|
||||
* intentionally set this at the very end to allow probes to be deferred
|
||||
* completely until Ion is setup
|
||||
*/
|
||||
idev = new_dev;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
kfree(heaps);
|
||||
if (pdata_needs_to_be_freed)
|
||||
free_pdata(pdata);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int msm_ion_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ion_device *idev = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_heaps; i++)
|
||||
ion_heap_destroy(heaps[i]);
|
||||
|
||||
ion_device_destroy(idev);
|
||||
kfree(heaps);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id msm_ion_match_table[] = {
|
||||
{.compatible = ION_COMPAT_STR},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver msm_ion_driver = {
|
||||
.probe = msm_ion_probe,
|
||||
.remove = msm_ion_remove,
|
||||
.driver = {
|
||||
.name = "ion-msm",
|
||||
.of_match_table = msm_ion_match_table,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init msm_ion_init(void)
|
||||
{
|
||||
return platform_driver_register(&msm_ion_driver);
|
||||
}
|
||||
|
||||
static void __exit msm_ion_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&msm_ion_driver);
|
||||
}
|
||||
subsys_initcall(msm_ion_init);
|
||||
module_exit(msm_ion_exit);
|
|
@ -1,7 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* drivers/staging/android/uapi/ion.h
|
||||
*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
*/
|
||||
|
||||
|
@ -114,7 +112,6 @@ struct ion_heap_query {
|
|||
*/
|
||||
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
|
||||
struct ion_allocation_data)
|
||||
|
||||
/**
|
||||
* DOC: ION_IOC_HEAP_QUERY - information about available heaps
|
||||
*
|
||||
|
|
117
drivers/staging/android/uapi/msm_ion.h
Normal file
117
drivers/staging/android/uapi/msm_ion.h
Normal file
|
@ -0,0 +1,117 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef _UAPI_LINUX_MSM_ION_H
|
||||
#define _UAPI_LINUX_MSM_ION_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ION_BIT(nr) (1U << (nr))
|
||||
|
||||
/**
|
||||
* TARGET_ION_ABI_VERSION can be used by user space clients to ensure that at
|
||||
* compile time only their code which uses the appropriate ION APIs for
|
||||
* this kernel is included.
|
||||
*/
|
||||
#define TARGET_ION_ABI_VERSION 2
|
||||
|
||||
enum msm_ion_heap_types {
|
||||
ION_HEAP_TYPE_MSM_START = 6,
|
||||
ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
|
||||
ION_HEAP_TYPE_SYSTEM_SECURE,
|
||||
ION_HEAP_TYPE_HYP_CMA,
|
||||
};
|
||||
|
||||
/**
|
||||
* These are the only ids that should be used for Ion heap ids.
|
||||
* The ids listed are the order in which allocation will be attempted
|
||||
* if specified. Don't swap the order of heap ids unless you know what
|
||||
* you are doing!
|
||||
* Id's are spaced by purpose to allow new Id's to be inserted in-between (for
|
||||
* possible fallbacks)
|
||||
*/
|
||||
|
||||
enum ion_heap_ids {
|
||||
INVALID_HEAP_ID = -1,
|
||||
ION_CP_MM_HEAP_ID = 8,
|
||||
ION_SECURE_HEAP_ID = 9,
|
||||
ION_SECURE_DISPLAY_HEAP_ID = 10,
|
||||
ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
|
||||
ION_ADSP_HEAP_ID = 22,
|
||||
ION_SYSTEM_HEAP_ID = 25,
|
||||
ION_QSECOM_HEAP_ID = 27,
|
||||
ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
|
||||
};
|
||||
|
||||
/**
|
||||
* Newly added heap ids have to be #define(d) since all API changes must
|
||||
* include a new #define.
|
||||
*/
|
||||
#define ION_QSECOM_TA_HEAP_ID 19
|
||||
#define ION_AUDIO_HEAP_ID 28
|
||||
#define ION_CAMERA_HEAP_ID 20
|
||||
/**
|
||||
* Flags to be used when allocating from the secure heap for
|
||||
* content protection
|
||||
*/
|
||||
#define ION_FLAG_CP_TOUCH ION_BIT(17)
|
||||
#define ION_FLAG_CP_BITSTREAM ION_BIT(18)
|
||||
#define ION_FLAG_CP_PIXEL ION_BIT(19)
|
||||
#define ION_FLAG_CP_NON_PIXEL ION_BIT(20)
|
||||
#define ION_FLAG_CP_CAMERA ION_BIT(21)
|
||||
#define ION_FLAG_CP_HLOS ION_BIT(22)
|
||||
#define ION_FLAG_CP_SPSS_SP ION_BIT(23)
|
||||
#define ION_FLAG_CP_SPSS_SP_SHARED ION_BIT(24)
|
||||
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
|
||||
#define ION_FLAG_CP_APP ION_BIT(26)
|
||||
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
|
||||
#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
|
||||
|
||||
#define ION_FLAGS_CP_MASK 0x7FFF0000
|
||||
|
||||
/**
|
||||
* Flag to allow non continguous allocation of memory from secure
|
||||
* heap
|
||||
*/
|
||||
#define ION_FLAG_ALLOW_NON_CONTIG ION_BIT(28)
|
||||
|
||||
/**
|
||||
* Flag to use when allocating to indicate that a heap is secure.
|
||||
* Do NOT use BIT macro since it is defined in #ifdef __KERNEL__
|
||||
*/
|
||||
#define ION_FLAG_SECURE ION_BIT(ION_HEAP_ID_RESERVED)
|
||||
|
||||
/*
|
||||
* Used in conjunction with heap which pool memory to force an allocation
|
||||
* to come from the page allocator directly instead of from the pool allocation
|
||||
*/
|
||||
#define ION_FLAG_POOL_FORCE_ALLOC ION_BIT(16)
|
||||
|
||||
/**
|
||||
* Macro should be used with ion_heap_ids defined above.
|
||||
*/
|
||||
#define ION_HEAP(bit) ION_BIT(bit)
|
||||
|
||||
#define ION_IOC_MSM_MAGIC 'M'
|
||||
|
||||
struct ion_prefetch_regions {
|
||||
__u32 vmid;
|
||||
__u64 __user *sizes;
|
||||
__u32 nr_sizes;
|
||||
};
|
||||
|
||||
struct ion_prefetch_data {
|
||||
__u32 heap_id;
|
||||
__u64 len;
|
||||
struct ion_prefetch_regions __user *regions;
|
||||
__u32 nr_regions;
|
||||
};
|
||||
|
||||
#define ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \
|
||||
struct ion_prefetch_data)
|
||||
|
||||
#define ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \
|
||||
struct ion_prefetch_data)
|
||||
|
||||
#endif /* _UAPI_LINUX_MSM_ION_H */
|
|
@ -188,6 +188,68 @@ struct dma_buf_ops {
|
|||
*/
|
||||
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* @begin_cpu_access_umapped:
|
||||
*
|
||||
* This is called as a result of the DMA_BUF_IOCTL_SYNC IOCTL being
|
||||
* called with the DMA_BUF_SYNC_START and DMA_BUF_SYNC_USER_MAPPED flags
|
||||
* set. It allows the exporter to ensure that the mmap(ed) portions of
|
||||
* the buffer are available for cpu access - the exporter might need to
|
||||
* allocate or swap-in and pin the backing storage.
|
||||
* The exporter also needs to ensure that cpu access is
|
||||
* coherent for the access direction. The direction can be used by the
|
||||
* exporter to optimize the cache flushing, i.e. access with a different
|
||||
* direction (read instead of write) might return stale or even bogus
|
||||
* data (e.g. when the exporter needs to copy the data to temporary
|
||||
* storage).
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. This can for
|
||||
* example fail when the backing storage can't be allocated. Can also
|
||||
* return -ERESTARTSYS or -EINTR when the call has been interrupted and
|
||||
* needs to be restarted.
|
||||
*/
|
||||
int (*begin_cpu_access_umapped)(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* @begin_cpu_access_partial:
|
||||
*
|
||||
* This is called from dma_buf_begin_cpu_access_partial() and allows the
|
||||
* exporter to ensure that the memory specified in the range is
|
||||
* available for cpu access - the exporter might need to allocate or
|
||||
* swap-in and pin the backing storage.
|
||||
* The exporter also needs to ensure that cpu access is
|
||||
* coherent for the access direction. The direction can be used by the
|
||||
* exporter to optimize the cache flushing, i.e. access with a different
|
||||
* direction (read instead of write) might return stale or even bogus
|
||||
* data (e.g. when the exporter needs to copy the data to temporary
|
||||
* storage).
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
|
||||
* from userspace (where storage shouldn't be pinned to avoid handing
|
||||
* de-factor mlock rights to userspace) and for the kernel-internal
|
||||
* users of the various kmap interfaces, where the backing storage must
|
||||
* be pinned to guarantee that the atomic kmap calls can succeed. Since
|
||||
* there's no in-kernel users of the kmap interfaces yet this isn't a
|
||||
* real problem.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. This can for
|
||||
* example fail when the backing storage can't be allocated. Can also
|
||||
* return -ERESTARTSYS or -EINTR when the call has been interrupted and
|
||||
* needs to be restarted.
|
||||
*/
|
||||
int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction,
|
||||
unsigned int offset, unsigned int len);
|
||||
|
||||
/**
|
||||
* @end_cpu_access:
|
||||
*
|
||||
|
@ -206,6 +268,51 @@ struct dma_buf_ops {
|
|||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* @end_cpu_access_umapped:
|
||||
*
|
||||
* This is called as result a of the DMA_BUF_IOCTL_SYNC IOCTL being
|
||||
* called with the DMA_BUF_SYNC_END and DMA_BUF_SYNC_USER_MAPPED flags
|
||||
* set. The exporter can use to limit cache flushing to only those parts
|
||||
* of the buffer which are mmap(ed) and to unpin any resources pinned in
|
||||
* @begin_cpu_access_umapped.
|
||||
* The result of any dma_buf kmap calls after end_cpu_access_umapped is
|
||||
* undefined.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. Can return
|
||||
* -ERESTARTSYS or -EINTR when the call has been interrupted and needs
|
||||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access_umapped)(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* @end_cpu_access_partial:
|
||||
*
|
||||
* This is called from dma_buf_end_cpu_access_partial() when the
|
||||
* importer is done accessing the CPU. The exporter can use to limit
|
||||
* cache flushing to only the range specefied and to unpin any
|
||||
* resources pinned in @begin_cpu_access_umapped.
|
||||
* The result of any dma_buf kmap calls after end_cpu_access_partial is
|
||||
* undefined.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. Can return
|
||||
* -ERESTARTSYS or -EINTR when the call has been interrupted and needs
|
||||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction,
|
||||
unsigned int offset, unsigned int len);
|
||||
|
||||
void *(*map_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*map)(struct dma_buf *, unsigned long);
|
||||
|
@ -250,6 +357,20 @@ struct dma_buf_ops {
|
|||
|
||||
void *(*vmap)(struct dma_buf *);
|
||||
void (*vunmap)(struct dma_buf *, void *vaddr);
|
||||
|
||||
/**
|
||||
* @get_flags:
|
||||
*
|
||||
* This is called by dma_buf_get_flags and is used to get the buffer's
|
||||
* flags.
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. On success flags
|
||||
* will be populated with the buffer's flags.
|
||||
*/
|
||||
int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -262,6 +383,7 @@ struct dma_buf_ops {
|
|||
* @vmapping_counter: used internally to refcnt the vmaps
|
||||
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
|
||||
* @exp_name: name of the exporter; useful for debugging.
|
||||
* @name: unique name for the buffer
|
||||
* @owner: pointer to exporter module; used for refcounting when exporter is a
|
||||
* kernel module.
|
||||
* @list_node: node for dma_buf accounting and debugging.
|
||||
|
@ -289,6 +411,7 @@ struct dma_buf {
|
|||
unsigned vmapping_counter;
|
||||
void *vmap_ptr;
|
||||
const char *exp_name;
|
||||
char *name;
|
||||
struct module *owner;
|
||||
struct list_head list_node;
|
||||
void *priv;
|
||||
|
@ -326,6 +449,7 @@ struct dma_buf_attachment {
|
|||
struct device *dev;
|
||||
struct list_head node;
|
||||
void *priv;
|
||||
unsigned long dma_map_attrs;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -393,8 +517,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
|
|||
enum dma_data_direction);
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir);
|
||||
int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset,
|
||||
unsigned int len);
|
||||
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir);
|
||||
int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset, unsigned int len);
|
||||
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
|
||||
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
|
||||
void *dma_buf_kmap(struct dma_buf *, unsigned long);
|
||||
|
@ -404,4 +535,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
|
|||
unsigned long);
|
||||
void *dma_buf_vmap(struct dma_buf *);
|
||||
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
|
||||
int dma_buf_get_flags(struct dma_buf *dma_buf, unsigned long *flags);
|
||||
#endif /* __DMA_BUF_H__ */
|
||||
|
|
11
include/linux/ion_kernel.h
Normal file
11
include/linux/ion_kernel.h
Normal file
|
@ -0,0 +1,11 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_ION_KERNEL_H__
|
||||
#define __LINUX_ION_KERNEL_H__
|
||||
|
||||
#include "../../drivers/staging/android/ion/ion_kernel.h"
|
||||
|
||||
#endif /* __LINUX_ION_KERNEL_H__ */
|
|
@ -338,6 +338,7 @@ bool memblock_is_map_memory(phys_addr_t addr);
|
|||
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
||||
bool memblock_is_reserved(phys_addr_t addr);
|
||||
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
|
||||
bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
|
||||
|
||||
extern void __memblock_dump_all(void);
|
||||
|
||||
|
|
173
include/trace/events/ion.h
Normal file
173
include/trace/events/ion.h
Normal file
|
@ -0,0 +1,173 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ion
|
||||
|
||||
#if !defined(_TRACE_ION_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_ION_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#define DEV_NAME_NONE "None"
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_dma_map_cmo_class,
|
||||
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
|
||||
__string(name, name)
|
||||
__field(bool, cached)
|
||||
__field(bool, hlos_accessible)
|
||||
__field(unsigned long, map_attrs)
|
||||
__field(enum dma_data_direction, dir)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
|
||||
__assign_str(name, name);
|
||||
__entry->cached = cached;
|
||||
__entry->hlos_accessible = hlos_accessible;
|
||||
__entry->map_attrs = map_attrs;
|
||||
__entry->dir = dir;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s name=%s cached=%d access=%d map_attrs=0x%lx dir=%d",
|
||||
__get_str(dev_name),
|
||||
__get_str(name),
|
||||
__entry->cached,
|
||||
__entry->hlos_accessible,
|
||||
__entry->map_attrs,
|
||||
__entry->dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_apply,
|
||||
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_skip,
|
||||
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_apply,
|
||||
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_skip,
|
||||
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, unsigned long map_attrs,
|
||||
enum dma_data_direction dir),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_access_cmo_class,
|
||||
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
|
||||
__string(name, name)
|
||||
__field(bool, cached)
|
||||
__field(bool, hlos_accessible)
|
||||
__field(enum dma_data_direction, dir)
|
||||
__field(bool, only_mapped)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
|
||||
__assign_str(name, name);
|
||||
__entry->cached = cached;
|
||||
__entry->hlos_accessible = hlos_accessible;
|
||||
__entry->dir = dir;
|
||||
__entry->only_mapped = only_mapped;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s name=%s cached=%d access=%d dir=%d, only_mapped=%d",
|
||||
__get_str(dev_name),
|
||||
__get_str(name),
|
||||
__entry->cached,
|
||||
__entry->hlos_accessible,
|
||||
__entry->dir,
|
||||
__entry->only_mapped)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_apply,
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_skip,
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_notmapped,
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_apply,
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_skip,
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_notmapped,
|
||||
TP_PROTO(const struct device *dev, const char *name,
|
||||
bool cached, bool hlos_accessible, enum dma_data_direction dir,
|
||||
bool only_mapped),
|
||||
|
||||
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
|
||||
);
|
||||
#endif /* _TRACE_ION_H */
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
|
@ -1,4 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kmem
|
||||
|
||||
|
@ -315,6 +319,538 @@ TRACE_EVENT(mm_page_alloc_extfrag,
|
|||
__entry->change_ownership)
|
||||
);
|
||||
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_alloc,
|
||||
|
||||
TP_PROTO(const char *client_name,
|
||||
const char *heap_name,
|
||||
size_t len,
|
||||
unsigned int mask,
|
||||
unsigned int flags),
|
||||
|
||||
TP_ARGS(client_name, heap_name, len, mask, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, client_name, 64)
|
||||
__field(const char *, heap_name)
|
||||
__field(size_t, len)
|
||||
__field(unsigned int, mask)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
strlcpy(__entry->client_name, client_name, 64);
|
||||
__entry->heap_name = heap_name;
|
||||
__entry->len = len;
|
||||
__entry->mask = mask;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
|
||||
__entry->client_name,
|
||||
__entry->heap_name,
|
||||
__entry->len,
|
||||
__entry->mask,
|
||||
__entry->flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
|
||||
|
||||
TP_PROTO(const char *client_name,
|
||||
const char *heap_name,
|
||||
size_t len,
|
||||
unsigned int mask,
|
||||
unsigned int flags),
|
||||
|
||||
TP_ARGS(client_name, heap_name, len, mask, flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
|
||||
|
||||
TP_PROTO(const char *client_name,
|
||||
const char *heap_name,
|
||||
size_t len,
|
||||
unsigned int mask,
|
||||
unsigned int flags),
|
||||
|
||||
TP_ARGS(client_name, heap_name, len, mask, flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_alloc_error,
|
||||
|
||||
TP_PROTO(const char *client_name,
|
||||
const char *heap_name,
|
||||
size_t len,
|
||||
unsigned int mask,
|
||||
unsigned int flags,
|
||||
long error),
|
||||
|
||||
TP_ARGS(client_name, heap_name, len, mask, flags, error),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, client_name)
|
||||
__field(const char *, heap_name)
|
||||
__field(size_t, len)
|
||||
__field(unsigned int, mask)
|
||||
__field(unsigned int, flags)
|
||||
__field(long, error)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->client_name = client_name;
|
||||
__entry->heap_name = heap_name;
|
||||
__entry->len = len;
|
||||
__entry->mask = mask;
|
||||
__entry->flags = flags;
|
||||
__entry->error = error;
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
|
||||
__entry->client_name,
|
||||
__entry->heap_name,
|
||||
__entry->len,
|
||||
__entry->mask,
|
||||
__entry->flags,
|
||||
__entry->error)
|
||||
);
|
||||
|
||||
|
||||
DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
|
||||
|
||||
TP_PROTO(const char *client_name,
|
||||
const char *heap_name,
|
||||
size_t len,
|
||||
unsigned int mask,
|
||||
unsigned int flags,
|
||||
long error),
|
||||
|
||||
TP_ARGS(client_name, heap_name, len, mask, flags, error)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
|
||||
|
||||
TP_PROTO(const char *client_name,
|
||||
const char *heap_name,
|
||||
size_t len,
|
||||
unsigned int mask,
|
||||
unsigned int flags,
|
||||
long error),
|
||||
|
||||
TP_ARGS(client_name, heap_name, len, mask, flags, error)
|
||||
);
|
||||
|
||||
|
||||
DECLARE_EVENT_CLASS(alloc_retry,
|
||||
|
||||
TP_PROTO(int tries),
|
||||
|
||||
TP_ARGS(tries),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, tries)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->tries = tries;
|
||||
),
|
||||
|
||||
TP_printk("tries=%d",
|
||||
__entry->tries)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
|
||||
|
||||
TP_PROTO(int tries),
|
||||
|
||||
TP_ARGS(tries)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(alloc_retry, migrate_retry,
|
||||
|
||||
TP_PROTO(int tries),
|
||||
|
||||
TP_ARGS(tries)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
|
||||
|
||||
TP_PROTO(int tries),
|
||||
|
||||
TP_ARGS(tries)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(migrate_pages,
|
||||
|
||||
TP_PROTO(int mode),
|
||||
|
||||
TP_ARGS(mode),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, mode)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mode = mode;
|
||||
),
|
||||
|
||||
TP_printk("mode=%d",
|
||||
__entry->mode)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(migrate_pages, migrate_pages_start,
|
||||
|
||||
TP_PROTO(int mode),
|
||||
|
||||
TP_ARGS(mode)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(migrate_pages, migrate_pages_end,
|
||||
|
||||
TP_PROTO(int mode),
|
||||
|
||||
TP_ARGS(mode)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_alloc_pages,
|
||||
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(gfp_t, gfp_flags)
|
||||
__field(unsigned int, order)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->gfp_flags = gfp_flags;
|
||||
__entry->order = order;
|
||||
),
|
||||
|
||||
TP_printk("gfp_flags=%s order=%d",
|
||||
show_gfp_flags(__entry->gfp_flags),
|
||||
__entry->order)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
|
||||
TP_PROTO(gfp_t gfp_flags,
|
||||
unsigned int order),
|
||||
|
||||
TP_ARGS(gfp_flags, order)
|
||||
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(smmu_map,
|
||||
|
||||
TP_PROTO(unsigned long va,
|
||||
phys_addr_t pa,
|
||||
unsigned long chunk_size,
|
||||
size_t len),
|
||||
|
||||
TP_ARGS(va, pa, chunk_size, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, va)
|
||||
__field(phys_addr_t, pa)
|
||||
__field(unsigned long, chunk_size)
|
||||
__field(size_t, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->va = va;
|
||||
__entry->pa = pa;
|
||||
__entry->chunk_size = chunk_size;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lx len=%zu",
|
||||
(void *)__entry->va,
|
||||
&__entry->pa,
|
||||
__entry->chunk_size,
|
||||
__entry->len)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(smmu_map, iommu_map_range,
|
||||
TP_PROTO(unsigned long va,
|
||||
phys_addr_t pa,
|
||||
unsigned long chunk_size,
|
||||
size_t len),
|
||||
|
||||
TP_ARGS(va, pa, chunk_size, len)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,
|
||||
|
||||
TP_PROTO(unsigned long len,
|
||||
int pool_total,
|
||||
bool is_prefetch),
|
||||
|
||||
TP_ARGS(len, pool_total, is_prefetch),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, len)
|
||||
__field(int, pool_total)
|
||||
__field(bool, is_prefetch)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->len = len;
|
||||
__entry->pool_total = pool_total;
|
||||
__entry->is_prefetch = is_prefetch;
|
||||
),
|
||||
|
||||
TP_printk("len %lx, pool total %x is_prefetch %d",
|
||||
__entry->len,
|
||||
__entry->pool_total,
|
||||
__entry->is_prefetch)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
|
||||
TP_PROTO(unsigned long len,
|
||||
int pool_total,
|
||||
bool is_prefetch),
|
||||
|
||||
TP_ARGS(len, pool_total, is_prefetch)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
|
||||
TP_PROTO(unsigned long len,
|
||||
int pool_total,
|
||||
bool is_prefetch),
|
||||
|
||||
TP_ARGS(len, pool_total, is_prefetch)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,
|
||||
|
||||
TP_PROTO(unsigned long drained_size,
|
||||
unsigned long skipped_size),
|
||||
|
||||
TP_ARGS(drained_size, skipped_size),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, drained_size)
|
||||
__field(unsigned long, skipped_size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->drained_size = drained_size;
|
||||
__entry->skipped_size = skipped_size;
|
||||
),
|
||||
|
||||
TP_printk("drained size %lx, skipped size %lx",
|
||||
__entry->drained_size,
|
||||
__entry->skipped_size)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
|
||||
TP_PROTO(unsigned long drained_size,
|
||||
unsigned long skipped_size),
|
||||
|
||||
TP_ARGS(drained_size, skipped_size)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
|
||||
TP_PROTO(unsigned long drained_size,
|
||||
unsigned long skipped_size),
|
||||
|
||||
TP_ARGS(drained_size, skipped_size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ion_prefetching,
|
||||
|
||||
TP_PROTO(unsigned long len),
|
||||
|
||||
TP_ARGS(len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("prefetch size %lx",
|
||||
__entry->len)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_secure_cma_allocate,
|
||||
|
||||
TP_PROTO(const char *heap_name,
|
||||
unsigned long len,
|
||||
unsigned long flags),
|
||||
|
||||
TP_ARGS(heap_name, len, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, heap_name)
|
||||
__field(unsigned long, len)
|
||||
__field(unsigned long, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->heap_name = heap_name;
|
||||
__entry->len = len;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("heap_name=%s len=%lx flags=%lx",
|
||||
__entry->heap_name,
|
||||
__entry->len,
|
||||
__entry->flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start,
|
||||
TP_PROTO(const char *heap_name,
|
||||
unsigned long len,
|
||||
unsigned long flags),
|
||||
|
||||
TP_ARGS(heap_name, len, flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end,
|
||||
TP_PROTO(const char *heap_name,
|
||||
unsigned long len,
|
||||
unsigned long flags),
|
||||
|
||||
TP_ARGS(heap_name, len, flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ion_cp_secure_buffer,
|
||||
|
||||
TP_PROTO(const char *heap_name,
|
||||
unsigned long len,
|
||||
unsigned long flags),
|
||||
|
||||
TP_ARGS(heap_name, len, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, heap_name)
|
||||
__field(unsigned long, len)
|
||||
__field(unsigned long, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->heap_name = heap_name;
|
||||
__entry->len = len;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("heap_name=%s len=%lx flags=%lx",
|
||||
__entry->heap_name,
|
||||
__entry->len,
|
||||
__entry->flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start,
|
||||
TP_PROTO(const char *heap_name,
|
||||
unsigned long len,
|
||||
unsigned long flags),
|
||||
|
||||
TP_ARGS(heap_name, len, flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end,
|
||||
TP_PROTO(const char *heap_name,
|
||||
unsigned long len,
|
||||
unsigned long flags),
|
||||
|
||||
TP_ARGS(heap_name, len, flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range,
|
||||
|
||||
TP_PROTO(int sec_id,
|
||||
int num,
|
||||
unsigned long va,
|
||||
unsigned int pa,
|
||||
size_t len),
|
||||
|
||||
TP_ARGS(sec_id, num, va, pa, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, sec_id)
|
||||
__field(int, num)
|
||||
__field(unsigned long, va)
|
||||
__field(unsigned int, pa)
|
||||
__field(size_t, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->sec_id = sec_id;
|
||||
__entry->num = num;
|
||||
__entry->va = va;
|
||||
__entry->pa = pa;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu",
|
||||
__entry->sec_id,
|
||||
__entry->num,
|
||||
__entry->va,
|
||||
__entry->pa,
|
||||
__entry->len)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start,
|
||||
|
||||
TP_PROTO(int sec_id,
|
||||
int num,
|
||||
unsigned long va,
|
||||
unsigned int pa,
|
||||
size_t len),
|
||||
|
||||
TP_ARGS(sec_id, num, va, pa, len)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end,
|
||||
|
||||
TP_PROTO(int sec_id,
|
||||
int num,
|
||||
unsigned long va,
|
||||
unsigned int pa,
|
||||
size_t len),
|
||||
|
||||
TP_ARGS(sec_id, num, va, pa, len)
|
||||
);
|
||||
#endif /* _TRACE_KMEM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -32,8 +32,10 @@ struct dma_buf_sync {
|
|||
#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
|
||||
#define DMA_BUF_SYNC_START (0 << 2)
|
||||
#define DMA_BUF_SYNC_END (1 << 2)
|
||||
#define DMA_BUF_SYNC_USER_MAPPED (1 << 3)
|
||||
|
||||
#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
|
||||
(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
|
||||
(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END | DMA_BUF_SYNC_USER_MAPPED)
|
||||
|
||||
#define DMA_BUF_BASE 'b'
|
||||
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
|
||||
|
|
1
include/uapi/linux/ion.h
Symbolic link
1
include/uapi/linux/ion.h
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../../drivers/staging/android/uapi/ion.h
|
1
include/uapi/linux/msm_ion.h
Symbolic link
1
include/uapi/linux/msm_ion.h
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../../drivers/staging/android/uapi/msm_ion.h
|
37
mm/cma.c
37
mm/cma.c
|
@ -36,6 +36,7 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/delay.h>
|
||||
#include <trace/events/cma.h>
|
||||
|
||||
#include "cma.h"
|
||||
|
@ -137,6 +138,10 @@ static int __init cma_activate_area(struct cma *cma)
|
|||
spin_lock_init(&cma->mem_head_lock);
|
||||
#endif
|
||||
|
||||
if (!PageHighMem(pfn_to_page(cma->base_pfn)))
|
||||
kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
|
||||
cma->count << PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
|
||||
not_in_zone:
|
||||
|
@ -409,6 +414,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|||
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
|
||||
struct page *page = NULL;
|
||||
int ret = -ENOMEM;
|
||||
int retry_after_sleep = 0;
|
||||
int max_retries = 2;
|
||||
int available_regions = 0;
|
||||
|
||||
if (!cma || !cma->count)
|
||||
return NULL;
|
||||
|
@ -433,9 +441,34 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|||
bitmap_maxno, start, bitmap_count, mask,
|
||||
offset);
|
||||
if (bitmap_no >= bitmap_maxno) {
|
||||
mutex_unlock(&cma->lock);
|
||||
break;
|
||||
if (retry_after_sleep < max_retries) {
|
||||
start = 0;
|
||||
/*
|
||||
* update max retries if available free regions
|
||||
* are less.
|
||||
*/
|
||||
if (available_regions < 3)
|
||||
max_retries = 5;
|
||||
available_regions = 0;
|
||||
/*
|
||||
* Page may be momentarily pinned by some other
|
||||
* process which has been scheduled out, eg.
|
||||
* in exit path, during unmap call, or process
|
||||
* fork and so cannot be freed there. Sleep
|
||||
* for 100ms and retry twice to see if it has
|
||||
* been freed later.
|
||||
*/
|
||||
mutex_unlock(&cma->lock);
|
||||
msleep(100);
|
||||
retry_after_sleep++;
|
||||
continue;
|
||||
} else {
|
||||
mutex_unlock(&cma->lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
available_regions++;
|
||||
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
|
||||
/*
|
||||
* It's safe to drop the lock here. We've marked this region for
|
||||
|
|
|
@ -1679,6 +1679,14 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
|
|||
memblock.memory.regions[idx].size) >= end;
|
||||
}
|
||||
|
||||
bool __init_memblock memblock_overlaps_memory(phys_addr_t base,
|
||||
phys_addr_t size)
|
||||
{
|
||||
memblock_cap_size(base, &size);
|
||||
|
||||
return memblock_overlaps_region(&memblock.memory, base, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_is_region_reserved - check if a region intersects reserved memory
|
||||
* @base: base of region to check
|
||||
|
|
Loading…
Reference in a new issue