dma: add user DMA arenas
These are arenas of user virtual addresses. The kernel can allocate
them. When we run a kernel driver for a device in the user's address
space, these arenas will be used for DMA addresses. i.e. addresses the
device can access.
I might be able to use these for UCQs or something in the future. In
essence, they are kernel-allocated and kernel-freed userspace memory.
Arguably, we could try to use arenas to do the user VA allocation, and
then hook in to the specific item (e.g. anon memory, file cache,
preexisting page) in a second step. In that sense, do_mmap() would call
the UVA arena allocator.
For now, just have the arena be a wrapper interface around do_mmap() is
fine. (arena->do_mmap, versus do_mmap->arena + other magic).
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/include/dma.h b/kern/include/dma.h
index 6ed7db6..30436dc 100644
--- a/kern/include/dma.h
+++ b/kern/include/dma.h
@@ -39,3 +39,10 @@
void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle);
void *dma_pool_zalloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle);
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
+
+/* Proc DMA arenas: mmapped user addresses, added and removed by the kernel.
+ * When you attach this to a process, that arena can be used for a driver's DMA
+ * arena. */
+struct proc;
+void setup_dma_arena(struct proc *p);
+void teardown_dma_arena(struct proc *p);
diff --git a/kern/src/dma.c b/kern/src/dma.c
index f4293e4..60c1dda 100644
--- a/kern/src/dma.c
+++ b/kern/src/dma.c
@@ -47,6 +47,9 @@
#include <dma.h>
#include <pmap.h>
#include <kmalloc.h>
+#include <process.h>
+#include <mm.h>
+#include <umem.h>
/* This arena is largely a wrapper around kpages. The arena does impose some
* overhead: btags and function pointers for every allocation. In return, we
@@ -218,3 +221,89 @@
{
kmem_cache_free(&dp->kc, (void*)addr);
}
+
+static void *user_pages_a(struct arena *a, size_t amt, int flags)
+{
+ struct dma_arena *da = container_of(a, struct dma_arena, arena);
+ struct proc *p = da->data;
+ void *uaddr;
+
+ uaddr = mmap(p, 0, amt, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_POPULATE | MAP_PRIVATE, -1, 0);
+
+ /* TODO: think about OOM for user dma arenas, and MEM_ flags. */
+ if (uaddr == MAP_FAILED) {
+ warn("couldn't mmap %d bytes, will probably panic", amt);
+ return NULL;
+ }
+ return uaddr;
+}
+
+static void user_pages_f(struct arena *a, void *obj, size_t amt)
+{
+ struct dma_arena *da = container_of(a, struct dma_arena, arena);
+ struct proc *p = da->data;
+
+ munmap(p, (uintptr_t)obj, amt);
+
+ /* TODO: move this to munmap */
+ extern void proc_iotlb_flush(struct proc *p);
+ proc_iotlb_flush(p);
+}
+
+static void *user_addr_to_kaddr(struct dma_arena *da, physaddr_t uaddr)
+{
+ /* Our caller needs to be running in the user's address space. We
+ * either need to pin the pages or handle page faults. We could use
+ * uva2kva(), but that only works for single pages. Handling contiguous
+ * pages would require mmapping a KVA-contig chunk or other acrobatics.
+ */
+ return (void*)uaddr;
+}
+
+/* Ensures a DMA arena exists for the proc. No-op if it already exists. */
+void setup_dma_arena(struct proc *p)
+{
+ struct dma_arena *da;
+ char name[32];
+ bool exists = false;
+
+ /* lockless peek */
+ if (READ_ONCE(p->user_pages))
+ return;
+ da = kzmalloc(sizeof(struct dma_arena), MEM_WAIT);
+ snprintf(name, ARRAY_SIZE(name), "proc-%d", p->pid);
+
+ __arena_create(&da->arena, name, PGSIZE,
+ user_pages_a, user_pages_f, ARENA_SELF_SOURCE, 0);
+
+ da->to_cpu_addr = user_addr_to_kaddr;
+ da->data = p;
+
+ spin_lock_irqsave(&p->proc_lock);
+ if (p->user_pages)
+ exists = true;
+ else
+ WRITE_ONCE(p->user_pages, da);
+ spin_unlock_irqsave(&p->proc_lock);
+
+ if (exists) {
+ __arena_destroy(&da->arena);
+ kfree(da);
+ }
+}
+
+/* Must be called only when all users (slabs, allocs) are done and freed.
+ * Basically during __proc_free(). */
+void teardown_dma_arena(struct proc *p)
+{
+ struct dma_arena *da;
+
+ da = p->user_pages;
+ if (!da)
+ return;
+ p->user_pages = NULL;
+
+ __arena_destroy(&da->arena);
+ kfree(da);
+}
diff --git a/kern/src/ktest/kt_arena.c b/kern/src/ktest/kt_arena.c
index 651e565..ae1415f 100644
--- a/kern/src/ktest/kt_arena.c
+++ b/kern/src/ktest/kt_arena.c
@@ -3,6 +3,7 @@
#include <ktest.h>
#include <dma.h>
#include <pmap.h>
+#include <umem.h>
KTEST_SUITE("ARENA")
@@ -494,6 +495,28 @@
return true;
}
+static bool test_user_dma(void)
+{
+ struct dma_arena *da;
+ void *o1, *o2;
+ dma_addr_t d1, d2;
+
+ setup_dma_arena(current);
+ da = current->user_pages;
+ o1 = dma_arena_alloc(da, PGSIZE * 4, &d1, MEM_WAIT);
+ o2 = dma_arena_alloc(da, PGSIZE, &d2, MEM_WAIT);
+
+ KT_ASSERT(is_user_rwaddr((void*)d1, PGSIZE * 4));
+ KT_ASSERT(is_user_rwaddr((void*)d2, PGSIZE));
+ KT_ASSERT(o1 == (void*)d1);
+ KT_ASSERT(o2 == (void*)d2);
+
+ dma_arena_free(da, o1, d1, PGSIZE * 4);
+ dma_arena_free(da, o2, d2, PGSIZE);
+
+ return true;
+}
+
static struct ktest ktests[] = {
KTEST_REG(nextfit, CONFIG_KTEST_ARENA),
KTEST_REG(bestfit, CONFIG_KTEST_ARENA),
@@ -513,6 +536,7 @@
KTEST_REG(accounting, CONFIG_KTEST_ARENA),
KTEST_REG(self_source, CONFIG_KTEST_ARENA),
KTEST_REG(dma_pool, CONFIG_KTEST_ARENA),
+ KTEST_REG(user_dma, CONFIG_KTEST_ARENA),
};
static int num_ktests = sizeof(ktests) / sizeof(struct ktest);
diff --git a/kern/src/process.c b/kern/src/process.c
index 78fcf8d..28f0b9f 100644
--- a/kern/src/process.c
+++ b/kern/src/process.c
@@ -525,6 +525,7 @@
kref_put(&p->strace->procs);
kref_put(&p->strace->users);
}
+ teardown_dma_arena(p);
__vmm_struct_cleanup(p);
p->progname[0] = 0;
free_path(p, p->binary_path);