vmm: Collect and report scheduler stats

You can access these with "notify 9".  e.g.

$ notify PID 9

Pass 1 as the arg to reset the counters.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/user/vmm/include/vmm/sched.h b/user/vmm/include/vmm/sched.h
index cf022b9..c5843e7 100644
--- a/user/vmm/include/vmm/sched.h
+++ b/user/vmm/include/vmm/sched.h
@@ -33,6 +33,7 @@
 	bool						halt_exit;
 	uth_mutex_t					*halt_mtx;
 	uth_cond_var_t				*halt_cv;
+	unsigned long				nr_vmexits;
 	// TODO: work out a real ops strategy.
 	bool (*vmcall)(struct guest_thread *gth, struct vm_trapframe *);
 };
@@ -62,6 +63,10 @@
 	int							type;
 	TAILQ_ENTRY(vmm_thread)		tq_next;
 	struct virtual_machine		*vm;
+	/* Sched stats */
+	int							prev_vcoreid;
+	unsigned long				nr_runs;
+	unsigned long				nr_resched;
 };
 
 TAILQ_HEAD(vmm_thread_tq, vmm_thread);
diff --git a/user/vmm/sched.c b/user/vmm/sched.c
index dd93702..de1a75b 100644
--- a/user/vmm/sched.c
+++ b/user/vmm/sched.c
@@ -218,6 +218,15 @@
 	return FALSE;
 }
 
+static void stats_run_vth(struct vmm_thread *vth)
+{
+	vth->nr_runs++;
+	if (vth->prev_vcoreid != vcore_id()) {
+		vth->prev_vcoreid = vcore_id();
+		vth->nr_resched++;
+	}
+}
+
 static void __attribute__((noreturn)) vmm_sched_entry(void)
 {
 	struct vmm_thread *vth;
@@ -228,14 +237,17 @@
 		/* slightly less than ideal: we grab the queue lock twice */
 		yield_current_uth();
 	}
-	if (current_uthread)
+	if (current_uthread) {
+		stats_run_vth((struct vmm_thread*)current_uthread);
 		run_current_uthread();
+	}
 	if (have_enough)
 		vth = pick_a_thread_plenty();
 	else
 		vth = pick_a_thread_degraded();
 	if (!vth)
 		vcore_yield_or_restart();
+	stats_run_vth(vth);
 	run_uthread((struct uthread*)vth);
 }
 
@@ -322,6 +334,7 @@
 	/* We just immediately run our buddy.  The ctlr and the guest are accounted
 	 * together ("pass the token" back and forth). */
 	current_uthread = NULL;
+	stats_run_vth((struct vmm_thread*)cth->buddy);
 	run_uthread((struct uthread*)cth->buddy);
 	assert(0);
 }
@@ -359,6 +372,7 @@
 	struct guest_thread *gth = (struct guest_thread*)uth;
 	struct ctlr_thread *cth = gth->buddy;
 
+	gth->nr_vmexits++;
 	/* The ctlr starts frm the top every time we get a new fault. */
 	cth->uthread.flags |= UTHREAD_SAVED;
 	init_user_ctx(&cth->uthread.u_ctx, (uintptr_t)&__ctlr_entry,
@@ -366,6 +380,7 @@
 	/* We just immediately run our buddy.  The ctlr and the guest are accounted
 	 * together ("pass the token" back and forth). */
 	current_uthread = NULL;
+	stats_run_vth((struct vmm_thread*)cth);
 	run_uthread((struct uthread*)cth);
 	assert(0);
 }
@@ -462,6 +477,38 @@
 	return gth;
 }
 
+static void ev_handle_diag(struct event_msg *ev_msg, unsigned int ev_type,
+                           void *data)
+{
+	struct virtual_machine *vm = current_vm;
+	struct guest_thread *gth;
+	struct ctlr_thread *cth;
+	bool reset = FALSE;
+
+	if (ev_msg && (ev_msg->ev_arg1 == 1))
+		reset = TRUE;
+
+	fprintf(stderr, "\nSCHED stats:\n---------------\n");
+	for (int i = 0; i < vm->nr_gpcs; i++) {
+		gth = vm->gths[i];
+		cth = gth->buddy;
+		fprintf(stderr, "\tGPC %2d: %lu resched, %lu gth runs, %lu ctl runs, %lu user-handled vmexits\n",
+				i,
+		        ((struct vmm_thread*)gth)->nr_resched,
+		        ((struct vmm_thread*)gth)->nr_runs,
+		        ((struct vmm_thread*)cth)->nr_runs,
+		        gth->nr_vmexits);
+		if (reset) {
+		    ((struct vmm_thread*)gth)->nr_resched = 0;
+		    ((struct vmm_thread*)gth)->nr_runs = 0;
+		    ((struct vmm_thread*)cth)->nr_runs = 0;
+		    gth->nr_vmexits = 0;
+		}
+	}
+	fprintf(stderr, "\n\tNr unblocked gpc %lu, Nr unblocked tasks %lu\n",
+	        atomic_read(&nr_unblk_guests), atomic_read(&nr_unblk_tasks));
+}
+
 int vmm_init(struct virtual_machine *vm, int flags)
 {
 	struct guest_thread **gths;
@@ -485,6 +532,7 @@
 	}
 	vm->gths = gths;
 	uthread_mcp_init();
+	register_ev_handler(EV_FREE_APPLE_PIE, ev_handle_diag, NULL);
 	return 0;
 }