VMRs that map page_maps are tracked
Needed so we can remove pages or otherwise see which processes are using
which parts of a PM.
diff --git a/kern/include/mm.h b/kern/include/mm.h
index cbe5ba6..e0133fe 100644
--- a/kern/include/mm.h
+++ b/kern/include/mm.h
@@ -24,6 +24,7 @@
* VMRs. */
struct vm_region {
TAILQ_ENTRY(vm_region) vm_link;
+ TAILQ_ENTRY(vm_region) vm_pm_link;
struct proc *vm_proc; /* owning process, for now */
uintptr_t vm_base;
uintptr_t vm_end;
@@ -34,8 +35,6 @@
};
TAILQ_HEAD(vmr_tailq, vm_region); /* Declares 'struct vmr_tailq' */
-#include <process.h> /* preprocessor games */
-
/* VM Region Management Functions. For now, these just maintain themselves -
* anything related to mapping needs to be done by the caller. */
void vmr_init(void);
diff --git a/kern/include/pagemap.h b/kern/include/pagemap.h
index bbbed5e..0aab4e9 100644
--- a/kern/include/pagemap.h
+++ b/kern/include/pagemap.h
@@ -12,6 +12,7 @@
#include <radix.h>
#include <atomic.h>
+#include <mm.h>
/* Need to be careful, due to some ghetto circular references */
struct page;
@@ -33,6 +34,8 @@
struct page_map_operations *pm_op;
unsigned int pm_flags;
/*... and private lists, backing block dev info, other mappings, etc. */
+ spinlock_t pm_lock;
+ struct vmr_tailq pm_vmrs;
};
/* Operations performed on a page_map. These are usually FS specific, which
@@ -57,5 +60,8 @@
void pm_init(struct page_map *pm, struct page_map_operations *op, void *host);
int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp);
void pm_put_page(struct page *page);
+void pm_add_vmr(struct page_map *pm, struct vm_region *vmr);
+void pm_remove_vmr(struct page_map *pm, struct vm_region *vmr);
+void print_page_map_info(struct page_map *pm);
#endif /* ROS_KERN_PAGEMAP_H */
diff --git a/kern/src/mm.c b/kern/src/mm.c
index 29aedfc..bc231df 100644
--- a/kern/src/mm.c
+++ b/kern/src/mm.c
@@ -25,10 +25,15 @@
#include <vfs.h>
#include <smp.h>
-static int __vmr_free_pgs(struct proc *p, pte_t *pte, void *va, void *arg);
-
struct kmem_cache *vmr_kcache;
+static int __vmr_free_pgs(struct proc *p, pte_t *pte, void *va, void *arg);
+/* minor helper, will ease the file->chan transition */
+static struct page_map *file2pm(struct file *file)
+{
+ return file->f_mapping;
+}
+
void vmr_init(void)
{
vmr_kcache = kmem_cache_create("vm_regions", sizeof(struct vm_region),
@@ -118,6 +123,7 @@
new_vmr->vm_file = old_vmr->vm_file;
new_vmr->vm_foff = old_vmr->vm_foff +
old_vmr->vm_end - old_vmr->vm_base;
+ pm_add_vmr(file2pm(old_vmr->vm_file), new_vmr);
} else {
new_vmr->vm_file = 0;
new_vmr->vm_foff = 0;
@@ -190,8 +196,10 @@
* out the page table entries. */
void destroy_vmr(struct vm_region *vmr)
{
- if (vmr->vm_file)
+ if (vmr->vm_file) {
+ pm_remove_vmr(file2pm(vmr->vm_file), vmr);
kref_put(&vmr->vm_file->f_kref);
+ }
TAILQ_REMOVE(&vmr->vm_proc->vm_regions, vmr, vm_link);
kmem_cache_free(vmr_kcache, vmr);
}
@@ -322,10 +330,12 @@
vmr->vm_end = vm_i->vm_end;
vmr->vm_prot = vm_i->vm_prot;
vmr->vm_flags = vm_i->vm_flags;
- if (vm_i->vm_file)
- kref_get(&vm_i->vm_file->f_kref, 1);
vmr->vm_file = vm_i->vm_file;
vmr->vm_foff = vm_i->vm_foff;
+ if (vm_i->vm_file) {
+ kref_get(&vm_i->vm_file->f_kref, 1);
+ pm_add_vmr(file2pm(vm_i->vm_file), vmr);
+ }
if (!vmr->vm_file || vmr->vm_flags & MAP_PRIVATE) {
assert(!(vmr->vm_flags & MAP_SHARED));
/* Copy over the memory from one VMR to the other */
@@ -499,6 +509,7 @@
vmr->vm_flags = flags;
if (file) {
if (!check_file_perms(vmr, file, prot)) {
+ assert(!vmr->vm_file);
destroy_vmr(vmr);
set_errno(EACCES);
return MAP_FAILED;
@@ -514,17 +525,20 @@
* immediately mprotect it to PROT_NONE. */
flags &= ~MAP_POPULATE;
}
+ /* Prep the FS to make sure it can mmap the file. Slightly weird
+ * semantics: if we fail and had munmapped the space, they will have a
+ * hole in their VM now. */
+ if (file->f_op->mmap(file, vmr)) {
+ assert(!vmr->vm_file);
+ destroy_vmr(vmr);
+ set_errno(EACCES); /* not quite */
+ return MAP_FAILED;
+ }
kref_get(&file->f_kref, 1);
+ pm_add_vmr(file2pm(file), vmr);
}
vmr->vm_file = file;
vmr->vm_foff = offset;
- /* Prep the FS to make sure it can mmap the file. Slightly weird semantics:
- * they will have a hole in their VM now. */
- if (file && file->f_op->mmap(file, vmr)) {
- destroy_vmr(vmr);
- set_errno(EACCES); /* not quite */
- return MAP_FAILED;
- }
addr = vmr->vm_base; /* so we know which pages to populate later */
vmr = merge_me(vmr); /* attempts to merge with neighbors */
/* Fault in pages now if MAP_POPULATE. We want to populate the region
diff --git a/kern/src/pagemap.c b/kern/src/pagemap.c
index c984911..62e43bf 100644
--- a/kern/src/pagemap.c
+++ b/kern/src/pagemap.c
@@ -12,6 +12,23 @@
#include <assert.h>
#include <stdio.h>
+void pm_add_vmr(struct page_map *pm, struct vm_region *vmr)
+{
+ /* note that the VMR being reverse-mapped by the PM is protected by the PM's
+ * lock. so later when removal holds this, it delays munmaps and keeps the
+ * VMR connected. */
+ spin_lock(&pm->pm_lock);
+ TAILQ_INSERT_TAIL(&pm->pm_vmrs, vmr, vm_pm_link);
+ spin_unlock(&pm->pm_lock);
+}
+
+void pm_remove_vmr(struct page_map *pm, struct vm_region *vmr)
+{
+ spin_lock(&pm->pm_lock);
+ TAILQ_REMOVE(&pm->pm_vmrs, vmr, vm_pm_link);
+ spin_unlock(&pm->pm_lock);
+}
+
/* Initializes a PM. Host should be an *inode or a *bdev (doesn't matter). The
* reference this stores is uncounted. */
void pm_init(struct page_map *pm, struct page_map_operations *op, void *host)
@@ -22,6 +39,8 @@
pm->pm_num_pages = 0; /* no pages in a new pm */
pm->pm_op = op;
pm->pm_flags = 0;
+ spinlock_init(&pm->pm_lock);
+ TAILQ_INIT(&pm->pm_vmrs);
}
/* Looks up the index'th page in the page map, returning an incref'd reference,
@@ -132,3 +151,17 @@
assert(atomic_read(&page->pg_flags) & PG_UPTODATE);
return 0;
}
+
+void print_page_map_info(struct page_map *pm)
+{
+ struct vm_region *vmr_i;
+ printk("Page Map %p\n", pm);
+ printk("\tNum pages: %lu\n", pm->pm_num_pages);
+ spin_lock(&pm->pm_lock);
+ TAILQ_FOREACH(vmr_i, &pm->pm_vmrs, vm_pm_link) {
+ printk("\tVMR proc %d: (%p - %p): 0x%08x, 0x%08x, %p, %p\n",
+ vmr_i->vm_proc->pid, vmr_i->vm_base, vmr_i->vm_end,
+ vmr_i->vm_prot, vmr_i->vm_flags, vmr_i->vm_file, vmr_i->vm_foff);
+ }
+ spin_unlock(&pm->pm_lock);
+}