summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2020-07-09 22:28:58 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2020-07-09 22:43:01 +0200
commit2dbf108457d0a0057cc63d5b3b89fd4da48d2a72 (patch)
tree7de245e353f736224b07609dfa81b637c1fb2cf4 /vm
parentd7d9a1e104c21183506935f26d59bb00161bd797 (diff)
Add vm_allocate_contiguous RPC
This allows privileged userland drivers to allocate buffers for e.g. DMA, and thus need them to be physically contiguous and get their physical address. Initial work by Zheng Da, reworked by Richard Braun, Damien Zammit, and myself. * doc/mach.texi (vm_allocate_contiguous): New RPC. * i386/include/mach/i386/machine_types.defs (rpc_phys_addr_t): New type. * i386/include/mach/i386/vm_types.h [!MACH_KERNEL] (phys_addr_t): Set type to 64bits. (rpc_phys_addr_t): New type, always 64bits. * include/mach/gnumach.defs (vm_allocate_contiguous):New RPC. * vm/vm_user.c (vm_allocate_contiguous): New function.
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_user.c126
1 files changed, 126 insertions, 0 deletions
diff --git a/vm/vm_user.c b/vm/vm_user.c
index f6fb1a41..4d5728c8 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -533,3 +533,129 @@ kern_return_t vm_msync(
return vm_map_msync(map, (vm_offset_t) address, size, sync_flags);
}
+
+/*
+ * vm_allocate_contiguous allocates "zero fill" physical memory and maps
+ * it into in the specfied map.
+ */
+/* TODO: respect physical alignment (palign)
+ * and minimum physical address (pmin)
+ */
+kern_return_t vm_allocate_contiguous(
+ host_t host_priv,
+ vm_map_t map,
+ vm_address_t *result_vaddr,
+ rpc_phys_addr_t *result_paddr,
+ vm_size_t size,
+ rpc_phys_addr_t pmin,
+ rpc_phys_addr_t pmax,
+ rpc_phys_addr_t palign)
+{
+ vm_size_t alloc_size;
+ unsigned int npages;
+ unsigned int i;
+ unsigned int order;
+ unsigned int selector;
+ vm_page_t pages;
+ vm_object_t object;
+ kern_return_t kr;
+ vm_address_t vaddr;
+
+ if (host_priv == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ /* FIXME */
+ if (pmin != 0)
+ return KERN_INVALID_ARGUMENT;
+
+ if (palign == 0)
+ palign = PAGE_SIZE;
+
+ /* FIXME */
+ if (palign != PAGE_SIZE)
+ return KERN_INVALID_ARGUMENT;
+
+ selector = VM_PAGE_SEL_DMA;
+ if (pmax > VM_PAGE_DMA_LIMIT)
+#ifdef VM_PAGE_DMA32_LIMIT
+ selector = VM_PAGE_SEL_DMA32;
+ if (pmax > VM_PAGE_DMA32_LIMIT)
+#endif
+ selector = VM_PAGE_SEL_DIRECTMAP;
+ if (pmax > VM_PAGE_DIRECTMAP_LIMIT)
+ selector = VM_PAGE_SEL_HIGHMEM;
+
+ size = vm_page_round(size);
+
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ object = vm_object_allocate(size);
+
+ if (object == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * XXX The page allocator returns blocks with a power-of-two size.
+ * The requested size may not be a power-of-two, requiring some
+ * work to release back the pages that aren't needed.
+ */
+ order = vm_page_order(size);
+ alloc_size = (1 << (order + PAGE_SHIFT));
+ npages = vm_page_atop(alloc_size);
+
+ pages = vm_page_grab_contig(alloc_size, selector);
+
+ if (pages == NULL) {
+ vm_object_deallocate(object);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ vm_object_lock(object);
+ vm_page_lock_queues();
+
+ for (i = 0; i < vm_page_atop(size); i++) {
+ /*
+ * XXX We can safely handle contiguous pages as an array,
+ * but this relies on knowing the implementation of the
+ * page allocator.
+ */
+ pages[i].busy = FALSE;
+ vm_page_insert(&pages[i], object, vm_page_ptoa(i));
+ vm_page_wire(&pages[i]);
+ }
+
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ for (i = vm_page_atop(size); i < npages; i++) {
+ vm_page_release(&pages[i], FALSE, FALSE);
+ }
+
+ vaddr = 0;
+ kr = vm_map_enter(map, &vaddr, size, 0, TRUE, object, 0, FALSE,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ kr = vm_map_pageable(map, vaddr, vaddr + size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ TRUE, TRUE);
+
+ if (kr != KERN_SUCCESS) {
+ vm_map_remove(map, vaddr, vaddr + size);
+ return kr;
+ }
+
+ *result_vaddr = vaddr;
+ *result_paddr = pages->phys_addr;
+
+ return KERN_SUCCESS;
+}