Skip to content

Commit

Permalink
dma buf: support SDP + multiple secure dmabuf heap
Browse files Browse the repository at this point in the history
Do not map secure heap for Secure Data Path.
Add multiple secure heap support

Signed-off-by: Olivier Masse <[email protected]>
  • Loading branch information
omasse-linaro committed May 18, 2022
1 parent fafead3 commit b52eac5
Show file tree
Hide file tree
Showing 2 changed files with 158 additions and 53 deletions.
1 change: 1 addition & 0 deletions drivers/dma-buf/heaps/Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0

obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o
obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL) += page_pool.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
Expand Down
210 changes: 157 additions & 53 deletions drivers/dma-buf/heaps/secure_heap.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
#include "page_pool.h"
#include "deferred-free-helper.h"

static struct dma_heap *secure_heap;
static struct gen_pool *pool;
#define MAX_SECURE_HEAP 2
#define MAX_HEAP_NAME_LEN 32

struct secure_heap_buffer {
struct dma_heap *heap;
Expand All @@ -43,18 +43,29 @@ struct dma_heap_attachment {
struct device *dev;
struct sg_table *table;
struct list_head list;
bool no_map;
bool mapped;

bool uncached;
};

struct secure_heap_info {
struct gen_pool *pool;

bool no_map;
};

struct rmem_secure {
phys_addr_t base;
phys_addr_t size;
};

static struct rmem_secure secure_data = {0};
char name[MAX_HEAP_NAME_LEN];

bool no_map;
};

static struct rmem_secure secure_data[MAX_SECURE_HEAP] = {0};
static uint32_t secure_data_count;

static struct sg_table *dup_sg_table(struct sg_table *table)
{
Expand All @@ -75,6 +86,10 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
new_sg = new_table->sgl;
for_each_sgtable_sg(table, sg, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
new_sg->dma_address = sg->dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
new_sg->dma_length = sg->dma_length;
#endif
new_sg = sg_next(new_sg);
}

Expand All @@ -85,6 +100,7 @@ static int secure_heap_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct secure_heap_buffer *buffer = dmabuf->priv;
struct secure_heap_info *info = dma_heap_get_drvdata(buffer->heap);
struct dma_heap_attachment *a;
struct sg_table *table;

Expand All @@ -99,8 +115,10 @@ static int secure_heap_attach(struct dma_buf *dmabuf,
}

a->table = table;

a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->no_map = info->no_map;
a->mapped = false;
a->uncached = buffer->uncached;
attachment->priv = a;
Expand Down Expand Up @@ -135,14 +153,17 @@ static struct sg_table *secure_heap_map_dma_buf(struct dma_buf_attachment *attac
int attr = 0;
int ret;

if (a->uncached)
attr = DMA_ATTR_SKIP_CPU_SYNC;
if (!a->no_map) {
if (a->uncached)
attr = DMA_ATTR_SKIP_CPU_SYNC;

ret = dma_map_sgtable(attachment->dev, table, direction, attr);
if (ret)
return ERR_PTR(ret);

ret = dma_map_sgtable(attachment->dev, table, direction, attr);
if (ret)
return ERR_PTR(ret);
a->mapped = true;
}

a->mapped = true;
return table;
}

Expand All @@ -153,11 +174,13 @@ static void secure_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct dma_heap_attachment *a = attachment->priv;
int attr = 0;

if (a->uncached)
attr = DMA_ATTR_SKIP_CPU_SYNC;
if (!a->no_map) {
if (a->uncached)
attr = DMA_ATTR_SKIP_CPU_SYNC;

a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, attr);
a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, attr);
}
}

static int secure_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Expand Down Expand Up @@ -324,19 +347,24 @@ static void secure_heap_buf_free(struct deferred_freelist_item *item,
enum df_reason reason)
{
struct secure_heap_buffer *buffer;
struct secure_heap_info *info;
struct sg_table *table;
struct scatterlist *sg;
int i;

buffer = container_of(item, struct secure_heap_buffer, deferred_free);
/* Zero the buffer pages before adding back to the pool */
if (reason == DF_NORMAL)
if (secure_heap_zero_buffer(buffer))
reason = DF_UNDER_PRESSURE; // On failure, just free
info = dma_heap_get_drvdata(buffer->heap);

if (!info->no_map) {
/* Zero the buffer pages before adding back to the pool */
if (reason == DF_NORMAL)
if (secure_heap_zero_buffer(buffer))
reason = DF_UNDER_PRESSURE; // On failure, just free
}

table = &buffer->sg_table;
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(pool,
gen_pool_free(info->pool,
sg_dma_address(sg),
sg_dma_len(sg));
}
Expand Down Expand Up @@ -374,12 +402,11 @@ static struct dma_buf *secure_heap_do_allocate(struct dma_heap *heap,
bool uncached)
{
struct secure_heap_buffer *buffer;
struct secure_heap_info *info = dma_heap_get_drvdata(heap);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
unsigned long size = roundup(len, PAGE_SIZE);
struct dma_buf *dmabuf;
struct sg_table *table;
struct list_head pages;
struct page *page;
int ret = -ENOMEM;
unsigned long phy_addr;

Expand All @@ -393,7 +420,7 @@ static struct dma_buf *secure_heap_do_allocate(struct dma_heap *heap,
buffer->len = size;
buffer->uncached = uncached;

phy_addr = gen_pool_alloc(pool, size);
phy_addr = gen_pool_alloc(info->pool, size);
if (!phy_addr)
goto free_buffer;

Expand All @@ -404,6 +431,7 @@ static struct dma_buf *secure_heap_do_allocate(struct dma_heap *heap,
sg_set_page(table->sgl,
phys_to_page(phy_addr),
size, 0);

sg_dma_address(table->sgl) = phy_addr;
sg_dma_len(table->sgl) = size;

Expand Down Expand Up @@ -445,54 +473,130 @@ static const struct dma_heap_ops secure_heap_ops = {
.allocate = secure_heap_allocate,
};

static int secure_heap_create(void)
static int secure_heap_add(struct rmem_secure *rmem)
{
struct dma_heap *secure_heap;
struct dma_heap_export_info exp_info;
int i;
int ret;
struct secure_heap_info *info = NULL;
struct gen_pool *pool;
int ret = -EINVAL;

pool = gen_pool_create(PAGE_SHIFT+4, -1);
if (!pool) {
pr_info("can't creat gen poool");
return -EINVAL;
if (rmem->base == 0 || rmem->size == 0) {
pr_err("secure_data base or size is not correct\n");
goto error;
}

/*add secure memory which reserved in dts into pool of genallocater*/
struct device_node np;
np.full_name = "secure";
np.name = "secure";
struct reserved_mem *rmem = of_reserved_mem_lookup(&np);
if (!rmem) {
pr_err("of_reserved_mem_lookup() returned NULL\n");
gen_pool_destroy(pool);
return 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_err("dmabuf info allocation failed\n");
ret = -ENOMEM;
goto error;
}

secure_data.base = rmem->base;
secure_data.size = rmem->size;

if (secure_data.base == 0 || secure_data.size == 0) {
pr_err("secure_data base or size is not correct\n");
gen_pool_destroy(pool);
return -EINVAL;
pool = gen_pool_create(PAGE_SHIFT+4, -1);
if (!pool) {
pr_err("can't create gen pool");
ret = -ENOMEM;
goto error;
}

ret = gen_pool_add(pool, secure_data.base, secure_data.size, -1);
if (ret < 0) {
if (gen_pool_add(pool, rmem->base, rmem->size, -1) < 0) {
pr_err("failed to add memory into pool");
gen_pool_destroy(pool);
return -EINVAL;
ret = -ENOMEM;
goto error;
}

exp_info.name = "secure";
info->pool = pool;
info->no_map = rmem->no_map;

exp_info.name = rmem->name;
exp_info.ops = &secure_heap_ops;
exp_info.priv = NULL;
exp_info.priv = info;

secure_heap = dma_heap_add(&exp_info);
if (IS_ERR(secure_heap))
return PTR_ERR(secure_heap);
if (IS_ERR(secure_heap)) {
pr_err("dmabuf secure heap allocation failed\n");
ret = PTR_ERR(secure_heap);
goto error;
}

return 0;

error:
if (info)
kfree(info);
if (pool)
gen_pool_destroy(pool);

return ret;
}

static int secure_heap_create(void)
{
int i;
int ret;

for (i = 0; i < secure_data_count; i++) {
if (secure_data[i].base == 0 || secure_data[i].size == 0)
return -EINVAL;

ret = secure_heap_add(&secure_data[i]);
if (ret)
return -EINVAL;
}
return 0;
}

static int rmem_secure_heap_device_init(struct reserved_mem *rmem,
struct device *dev)
{
dev_set_drvdata(dev, rmem);
return 0;
}

static void rmem_secure_heap_device_release(struct reserved_mem *rmem,
struct device *dev)
{
dev_set_drvdata(dev, NULL);
}

static const struct reserved_mem_ops rmem_dma_ops = {
.device_init = rmem_secure_heap_device_init,
.device_release = rmem_secure_heap_device_release,
};

static int __init rmem_secure_heap_setup(struct reserved_mem *rmem)
{
if (secure_data_count < MAX_SECURE_HEAP) {
int name_len = 0;
char *s = rmem->name;

secure_data[secure_data_count].base = rmem->base;
secure_data[secure_data_count].size = rmem->size;
secure_data[secure_data_count].no_map =
(of_get_flat_dt_prop(rmem->fdt_node, "no-map", NULL) != NULL);

while (name_len < MAX_HEAP_NAME_LEN) {
if ((*s == '@') || (*s == '\0'))
break;
name_len++;
s++;
}

strncpy(secure_data[secure_data_count].name, rmem->name, name_len);
rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: DMA buf secure pool %s at %pa, size %ld MiB\n",
secure_data[secure_data_count].name,
&rmem->base, (unsigned long)rmem->size / SZ_1M);

secure_data_count++;
return 0;
} else {
return -EINVAL;
}
}

RESERVEDMEM_OF_DECLARE(secure_heap, "linaro,secure-heap", rmem_secure_heap_setup);

module_init(secure_heap_create);
MODULE_LICENSE("GPL v2");

0 comments on commit b52eac5

Please sign in to comment.