Files
linux/drivers/dma-buf/heaps/system_heap.c
Jiri Pirko 78b30c50a7 dma-buf: heaps: system: add system_cc_shared heap for explicitly shared memory
Add a new "system_cc_shared" dma-buf heap to allow userspace to
allocate shared (decrypted) memory for confidential computing (CoCo)
VMs.

On CoCo VMs, guest memory is private by default. The hardware uses an
encryption bit in page table entries (C-bit on AMD SEV, "shared" bit on
Intel TDX) to control whether a given memory access is private or
shared. The kernel's direct map is set up as private,
so pages returned by alloc_pages() are private in the direct map
by default. To make this memory usable for devices that do not support
DMA to private memory (no TDISP support), it has to be explicitly
shared. A couple of things are needed to properly handle
shared memory for the dma-buf use case:

- set_memory_decrypted() on the direct map after allocation:
  Besides clearing the encryption bit in the direct map PTEs, this
  also notifies the hypervisor about the page state change. On free,
  the inverse set_memory_encrypted() must be called before returning
  pages to the allocator. If re-encryption fails, pages
  are intentionally leaked to prevent shared memory from being
  reused as private.

- pgprot_decrypted() for userspace and kernel virtual mappings:
  Any new mapping of the shared pages, be it to userspace via
  mmap or to kernel vmalloc space via vmap, creates PTEs independent
  of the direct map. These must also have the encryption bit cleared,
  otherwise accesses through them would see encrypted (garbage) data.

- DMA_ATTR_CC_SHARED for DMA mapping:
  Since the pages are already shared, the DMA API needs to be
  informed via DMA_ATTR_CC_SHARED so it can map them correctly
  as unencrypted for device access.

On non-CoCo VMs, the system_cc_shared heap is not registered
to prevent misuse by userspace that does not understand
the security implications of explicitly shared memory.

Signed-off-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: T.J. Mercier <tjmercier@google.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20260325192352.437608-3-jiri@resnulli.us
2026-04-02 07:29:33 +02:00

540 lines
12 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* DMABUF System heap exporter
*
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2019, 2020 Linaro Ltd.
*
* Portions based off of Andrew Davis' SRAM heap:
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
#include <linux/cc_platform.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mem_encrypt.h>
#include <linux/mm.h>
#include <linux/set_memory.h>
#include <linux/module.h>
#include <linux/pgtable.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
struct system_heap_priv {
bool cc_shared;
};
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
struct mutex lock;
unsigned long len;
struct sg_table sg_table;
int vmap_cnt;
void *vaddr;
bool cc_shared;
};
struct dma_heap_attachment {
struct device *dev;
struct sg_table table;
struct list_head list;
bool mapped;
bool cc_shared;
};
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
/*
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
* of order 0 pages can significantly improve the performance of many IOMMUs
* by reducing TLB pressure and time spent updating page tables.
*/
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
static int system_heap_set_page_decrypted(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
unsigned int nr_pages = 1 << compound_order(page);
int ret;
ret = set_memory_decrypted(addr, nr_pages);
if (ret)
pr_warn_ratelimited("dma-buf system heap: failed to decrypt page at %p\n",
page_address(page));
return ret;
}
static int system_heap_set_page_encrypted(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
unsigned int nr_pages = 1 << compound_order(page);
int ret;
ret = set_memory_encrypted(addr, nr_pages);
if (ret)
pr_warn_ratelimited("dma-buf system heap: failed to re-encrypt page at %p, leaking memory\n",
page_address(page));
return ret;
}
static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
struct scatterlist *sg, *new_sg;
int ret, i;
ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
if (ret)
return ret;
new_sg = to->sgl;
for_each_sgtable_sg(from, sg, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
new_sg = sg_next(new_sg);
}
return 0;
}
static int system_heap_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
int ret;
a = kzalloc_obj(*a);
if (!a)
return -ENOMEM;
ret = dup_sg_table(&buffer->sg_table, &a->table);
if (ret) {
kfree(a);
return ret;
}
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
a->cc_shared = buffer->cc_shared;
attachment->priv = a;
mutex_lock(&buffer->lock);
list_add(&a->list, &buffer->attachments);
mutex_unlock(&buffer->lock);
return 0;
}
static void system_heap_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a = attachment->priv;
mutex_lock(&buffer->lock);
list_del(&a->list);
mutex_unlock(&buffer->lock);
sg_free_table(&a->table);
kfree(a);
}
static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
unsigned long attrs;
int ret;
attrs = a->cc_shared ? DMA_ATTR_CC_SHARED : 0;
ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
if (ret)
return ERR_PTR(ret);
a->mapped = true;
return table;
}
static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, 0);
}
static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
dma_sync_sgtable_for_device(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table = &buffer->sg_table;
unsigned long addr = vma->vm_start;
unsigned long pgoff = vma->vm_pgoff;
struct scatterlist *sg;
pgprot_t prot;
int i, ret;
prot = vma->vm_page_prot;
if (buffer->cc_shared)
prot = pgprot_decrypted(prot);
for_each_sgtable_sg(table, sg, i) {
unsigned long n = sg->length >> PAGE_SHIFT;
if (pgoff < n)
break;
pgoff -= n;
}
for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
struct page *page = sg_page(sg) + pgoff;
unsigned long size = n << PAGE_SHIFT;
if (addr + size > vma->vm_end)
size = vma->vm_end - addr;
ret = remap_pfn_range(vma, addr, page_to_pfn(page), size, prot);
if (ret)
return ret;
addr += size;
pgoff = 0;
}
return 0;
}
static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
{
struct sg_table *table = &buffer->sg_table;
int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
struct page **pages = vmalloc(sizeof(struct page *) * npages);
struct page **tmp = pages;
struct sg_page_iter piter;
pgprot_t prot;
void *vaddr;
if (!pages)
return ERR_PTR(-ENOMEM);
for_each_sgtable_page(table, &piter, 0) {
WARN_ON(tmp - pages >= npages);
*tmp++ = sg_page_iter_page(&piter);
}
prot = PAGE_KERNEL;
if (buffer->cc_shared)
prot = pgprot_decrypted(prot);
vaddr = vmap(pages, npages, VM_MAP, prot);
vfree(pages);
if (!vaddr)
return ERR_PTR(-ENOMEM);
return vaddr;
}
static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct system_heap_buffer *buffer = dmabuf->priv;
void *vaddr;
int ret = 0;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt) {
buffer->vmap_cnt++;
iosys_map_set_vaddr(map, buffer->vaddr);
goto out;
}
vaddr = system_heap_do_vmap(buffer);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto out;
}
buffer->vaddr = vaddr;
buffer->vmap_cnt++;
iosys_map_set_vaddr(map, buffer->vaddr);
out:
mutex_unlock(&buffer->lock);
return ret;
}
static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct system_heap_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
if (!--buffer->vmap_cnt) {
vunmap(buffer->vaddr);
buffer->vaddr = NULL;
}
mutex_unlock(&buffer->lock);
iosys_map_clear(map);
}
static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table;
struct scatterlist *sg;
int i;
table = &buffer->sg_table;
for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg);
/*
* Intentionally leak pages that cannot be re-encrypted
* to prevent shared memory from being reused.
*/
if (buffer->cc_shared &&
system_heap_set_page_encrypted(page))
continue;
__free_pages(page, compound_order(page));
}
sg_free_table(table);
kfree(buffer);
}
static const struct dma_buf_ops system_heap_buf_ops = {
.attach = system_heap_attach,
.detach = system_heap_detach,
.map_dma_buf = system_heap_map_dma_buf,
.unmap_dma_buf = system_heap_unmap_dma_buf,
.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
.end_cpu_access = system_heap_dma_buf_end_cpu_access,
.mmap = system_heap_mmap,
.vmap = system_heap_vmap,
.vunmap = system_heap_vunmap,
.release = system_heap_dma_buf_release,
};
static struct page *alloc_largest_available(unsigned long size,
unsigned int max_order)
{
struct page *page;
int i;
gfp_t flags;
for (i = 0; i < NUM_ORDERS; i++) {
if (size < (PAGE_SIZE << orders[i]))
continue;
if (max_order < orders[i])
continue;
flags = order_flags[i];
if (mem_accounting)
flags |= __GFP_ACCOUNT;
page = alloc_pages(flags, orders[i]);
if (!page)
continue;
return page;
}
return NULL;
}
static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
unsigned long len,
u32 fd_flags,
u64 heap_flags)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
unsigned long size_remaining = len;
unsigned int max_order = orders[0];
struct system_heap_priv *priv = dma_heap_get_drvdata(heap);
bool cc_shared = priv->cc_shared;
struct dma_buf *dmabuf;
struct sg_table *table;
struct scatterlist *sg;
struct list_head pages;
struct page *page, *tmp_page;
int i, ret = -ENOMEM;
buffer = kzalloc_obj(*buffer);
if (!buffer)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->heap = heap;
buffer->len = len;
buffer->cc_shared = cc_shared;
INIT_LIST_HEAD(&pages);
i = 0;
while (size_remaining > 0) {
/*
* Avoid trying to allocate memory if the process
* has been killed by SIGKILL
*/
if (fatal_signal_pending(current)) {
ret = -EINTR;
goto free_buffer;
}
page = alloc_largest_available(size_remaining, max_order);
if (!page)
goto free_buffer;
list_add_tail(&page->lru, &pages);
size_remaining -= page_size(page);
max_order = compound_order(page);
i++;
}
table = &buffer->sg_table;
if (sg_alloc_table(table, i, GFP_KERNEL))
goto free_buffer;
sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
sg_set_page(sg, page, page_size(page), 0);
sg = sg_next(sg);
list_del(&page->lru);
}
if (cc_shared) {
for_each_sgtable_sg(table, sg, i) {
ret = system_heap_set_page_decrypted(sg_page(sg));
if (ret)
goto free_pages;
}
}
/* create the dmabuf */
exp_info.exp_name = dma_heap_get_name(heap);
exp_info.ops = &system_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
goto free_pages;
}
return dmabuf;
free_pages:
for_each_sgtable_sg(table, sg, i) {
struct page *p = sg_page(sg);
/*
* Intentionally leak pages that cannot be re-encrypted
* to prevent shared memory from being reused.
*/
if (buffer->cc_shared &&
system_heap_set_page_encrypted(p))
continue;
__free_pages(p, compound_order(p));
}
sg_free_table(table);
free_buffer:
list_for_each_entry_safe(page, tmp_page, &pages, lru)
__free_pages(page, compound_order(page));
kfree(buffer);
return ERR_PTR(ret);
}
static const struct dma_heap_ops system_heap_ops = {
.allocate = system_heap_allocate,
};
static struct system_heap_priv system_heap_priv = {
.cc_shared = false,
};
static struct system_heap_priv system_heap_cc_shared_priv = {
.cc_shared = true,
};
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
exp_info.priv = &system_heap_priv;
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
return PTR_ERR(sys_heap);
if (IS_ENABLED(CONFIG_HIGHMEM) ||
!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
exp_info.name = "system_cc_shared";
exp_info.priv = &system_heap_cc_shared_priv;
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
return PTR_ERR(sys_heap);
return 0;
}
module_init(system_heap_create);