mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
mm: add mmap_action_simple_ioremap()
Currently drivers use vm_iomap_memory() as a simple helper function for I/O remapping memory over a range starting at a specified physical address over a specified length. In order to utilise this from mmap_prepare, separate out the core logic into __simple_ioremap_prep(), update vm_iomap_memory() to use it, and add simple_ioremap_prepare() to do the same with a VMA descriptor object. We also add MMAP_SIMPLE_IO_REMAP and relevant fields to the struct mmap_action type to permit this operation also. We use mmap_action_ioremap() to set up the actual I/O remap operation once we have checked and figured out the parameters, which makes simple_ioremap_prepare() easy to implement. We then add mmap_action_simple_ioremap() to allow drivers to make use of this mode. We update the mmap_prepare documentation to describe this mode. Finally, we update the VMA tests to reflect this change. Link: https://lkml.kernel.org/r/a08ef1c4542202684da63bb37f459d5dbbeddd91.1774045440.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Torgue <alexandre.torgue@foss.st.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Bodo Stroesser <bostroesser@gmail.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Clemens Ladisch <clemens@ladisch.de> Cc: David Hildenbrand <david@kernel.org> Cc: David Howells <dhowells@redhat.com> Cc: Dexuan Cui <decui@microsoft.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: K. Y. Srinivasan <kys@microsoft.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Long Li <longli@microsoft.com> Cc: Marc Dionne <marc.dionne@auristor.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Miquel Raynal <miquel.raynal@bootlin.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Vignesh Raghavendra <vigneshr@ti.com> Cc: Wei Liu <wei.liu@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
4995c67d4e
commit
a1b7fb40cb
@@ -153,5 +153,8 @@ pointer. These are:
|
||||
* mmap_action_ioremap_full() - Same as mmap_action_ioremap(), only remaps
|
||||
the entire mapping from ``start_pfn`` onward.
|
||||
|
||||
* mmap_action_simple_ioremap() - Sets up an I/O remap from a specified
|
||||
physical address and over a specified length.
|
||||
|
||||
**NOTE:** The ``action`` field should never normally be manipulated directly,
|
||||
rather you ought to use one of these helpers.
|
||||
|
||||
@@ -4326,6 +4326,28 @@ static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
|
||||
mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
|
||||
}
|
||||
|
||||
/**
|
||||
* mmap_action_simple_ioremap - helper for mmap_prepare hook to specify that the
|
||||
* physical range in [start_phys_addr, start_phys_addr + size) should be I/O
|
||||
* remapped.
|
||||
* @desc: The VMA descriptor for the VMA requiring remap.
|
||||
* @start_phys_addr: Start of the physical memory to be mapped.
|
||||
* @size: Size of the area to map.
|
||||
*
|
||||
* NOTE: Some drivers might want to tweak desc->page_prot for purposes of
|
||||
* write-combine or similar.
|
||||
*/
|
||||
static inline void mmap_action_simple_ioremap(struct vm_area_desc *desc,
|
||||
phys_addr_t start_phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct mmap_action *action = &desc->action;
|
||||
|
||||
action->simple_ioremap.start_phys_addr = start_phys_addr;
|
||||
action->simple_ioremap.size = size;
|
||||
action->type = MMAP_SIMPLE_IO_REMAP;
|
||||
}
|
||||
|
||||
int mmap_action_prepare(struct vm_area_desc *desc);
|
||||
int mmap_action_complete(struct vm_area_struct *vma,
|
||||
struct mmap_action *action);
|
||||
|
||||
@@ -814,6 +814,7 @@ enum mmap_action_type {
|
||||
MMAP_NOTHING, /* Mapping is complete, no further action. */
|
||||
MMAP_REMAP_PFN, /* Remap PFN range. */
|
||||
MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
|
||||
MMAP_SIMPLE_IO_REMAP, /* I/O remap with guardrails. */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -822,13 +823,16 @@ enum mmap_action_type {
|
||||
*/
|
||||
struct mmap_action {
|
||||
union {
|
||||
/* Remap range. */
|
||||
struct {
|
||||
unsigned long start;
|
||||
unsigned long start_pfn;
|
||||
unsigned long size;
|
||||
pgprot_t pgprot;
|
||||
} remap;
|
||||
struct {
|
||||
phys_addr_t start_phys_addr;
|
||||
unsigned long size;
|
||||
} simple_ioremap;
|
||||
};
|
||||
enum mmap_action_type type;
|
||||
|
||||
|
||||
@@ -1842,6 +1842,7 @@ int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
|
||||
int remap_pfn_range_prepare(struct vm_area_desc *desc);
|
||||
int remap_pfn_range_complete(struct vm_area_struct *vma,
|
||||
struct mmap_action *action);
|
||||
int simple_ioremap_prepare(struct vm_area_desc *desc);
|
||||
|
||||
static inline int io_remap_pfn_range_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
|
||||
85
mm/memory.c
85
mm/memory.c
@@ -3170,6 +3170,58 @@ int remap_pfn_range_complete(struct vm_area_struct *vma,
|
||||
return do_remap_pfn_range(vma, start, pfn, size, prot);
|
||||
}
|
||||
|
||||
static int __simple_ioremap_prep(unsigned long vm_len, pgoff_t vm_pgoff,
|
||||
phys_addr_t start_phys, unsigned long size,
|
||||
unsigned long *pfnp)
|
||||
{
|
||||
unsigned long pfn, pages;
|
||||
|
||||
/* Check that the physical memory area passed in looks valid */
|
||||
if (start_phys + size < start_phys)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* You *really* shouldn't map things that aren't page-aligned,
|
||||
* but we've historically allowed it because IO memory might
|
||||
* just have smaller alignment.
|
||||
*/
|
||||
size += start_phys & ~PAGE_MASK;
|
||||
pfn = start_phys >> PAGE_SHIFT;
|
||||
pages = (size + ~PAGE_MASK) >> PAGE_SHIFT;
|
||||
if (pfn + pages < pfn)
|
||||
return -EINVAL;
|
||||
|
||||
/* We start the mapping 'vm_pgoff' pages into the area */
|
||||
if (vm_pgoff > pages)
|
||||
return -EINVAL;
|
||||
pfn += vm_pgoff;
|
||||
pages -= vm_pgoff;
|
||||
|
||||
/* Can we fit all of the mapping? */
|
||||
if ((vm_len >> PAGE_SHIFT) > pages)
|
||||
return -EINVAL;
|
||||
|
||||
*pfnp = pfn;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int simple_ioremap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct mmap_action *action = &desc->action;
|
||||
const phys_addr_t start = action->simple_ioremap.start_phys_addr;
|
||||
const unsigned long size = action->simple_ioremap.size;
|
||||
unsigned long pfn;
|
||||
int err;
|
||||
|
||||
err = __simple_ioremap_prep(vma_desc_size(desc), desc->pgoff,
|
||||
start, size, &pfn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* The I/O remap logic does the heavy lifting. */
|
||||
mmap_action_ioremap_full(desc, pfn);
|
||||
return io_remap_pfn_range_prepare(desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* vm_iomap_memory - remap memory to userspace
|
||||
* @vma: user vma to map to
|
||||
@@ -3187,32 +3239,15 @@ int remap_pfn_range_complete(struct vm_area_struct *vma,
|
||||
*/
|
||||
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
|
||||
{
|
||||
unsigned long vm_len, pfn, pages;
|
||||
const unsigned long vm_start = vma->vm_start;
|
||||
const unsigned long vm_end = vma->vm_end;
|
||||
const unsigned long vm_len = vm_end - vm_start;
|
||||
unsigned long pfn;
|
||||
int err;
|
||||
|
||||
/* Check that the physical memory area passed in looks valid */
|
||||
if (start + len < start)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* You *really* shouldn't map things that aren't page-aligned,
|
||||
* but we've historically allowed it because IO memory might
|
||||
* just have smaller alignment.
|
||||
*/
|
||||
len += start & ~PAGE_MASK;
|
||||
pfn = start >> PAGE_SHIFT;
|
||||
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
|
||||
if (pfn + pages < pfn)
|
||||
return -EINVAL;
|
||||
|
||||
/* We start the mapping 'vm_pgoff' pages into the area */
|
||||
if (vma->vm_pgoff > pages)
|
||||
return -EINVAL;
|
||||
pfn += vma->vm_pgoff;
|
||||
pages -= vma->vm_pgoff;
|
||||
|
||||
/* Can we fit all of the mapping? */
|
||||
vm_len = vma->vm_end - vma->vm_start;
|
||||
if (vm_len >> PAGE_SHIFT > pages)
|
||||
return -EINVAL;
|
||||
err = __simple_ioremap_prep(vm_len, vma->vm_pgoff, start, len, &pfn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Ok, let it rip */
|
||||
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
|
||||
|
||||
@@ -1393,6 +1393,8 @@ int mmap_action_prepare(struct vm_area_desc *desc)
|
||||
return remap_pfn_range_prepare(desc);
|
||||
case MMAP_IO_REMAP_PFN:
|
||||
return io_remap_pfn_range_prepare(desc);
|
||||
case MMAP_SIMPLE_IO_REMAP:
|
||||
return simple_ioremap_prepare(desc);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
@@ -1421,6 +1423,7 @@ int mmap_action_complete(struct vm_area_struct *vma,
|
||||
err = remap_pfn_range_complete(vma, action);
|
||||
break;
|
||||
case MMAP_IO_REMAP_PFN:
|
||||
case MMAP_SIMPLE_IO_REMAP:
|
||||
/* Should have been delegated. */
|
||||
WARN_ON_ONCE(1);
|
||||
err = -EINVAL;
|
||||
@@ -1438,6 +1441,7 @@ int mmap_action_prepare(struct vm_area_desc *desc)
|
||||
break;
|
||||
case MMAP_REMAP_PFN:
|
||||
case MMAP_IO_REMAP_PFN:
|
||||
case MMAP_SIMPLE_IO_REMAP:
|
||||
WARN_ON_ONCE(1); /* nommu cannot handle these. */
|
||||
break;
|
||||
}
|
||||
@@ -1456,6 +1460,7 @@ int mmap_action_complete(struct vm_area_struct *vma,
|
||||
break;
|
||||
case MMAP_REMAP_PFN:
|
||||
case MMAP_IO_REMAP_PFN:
|
||||
case MMAP_SIMPLE_IO_REMAP:
|
||||
WARN_ON_ONCE(1); /* nommu cannot handle this. */
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
@@ -453,6 +453,7 @@ enum mmap_action_type {
|
||||
MMAP_NOTHING, /* Mapping is complete, no further action. */
|
||||
MMAP_REMAP_PFN, /* Remap PFN range. */
|
||||
MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
|
||||
MMAP_SIMPLE_IO_REMAP, /* I/O remap with guardrails. */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -461,13 +462,16 @@ enum mmap_action_type {
|
||||
*/
|
||||
struct mmap_action {
|
||||
union {
|
||||
/* Remap range. */
|
||||
struct {
|
||||
unsigned long start;
|
||||
unsigned long start_pfn;
|
||||
unsigned long size;
|
||||
pgprot_t pgprot;
|
||||
} remap;
|
||||
struct {
|
||||
phys_addr_t start_phys_addr;
|
||||
unsigned long size;
|
||||
} simple_ioremap;
|
||||
};
|
||||
enum mmap_action_type type;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user