WIP-pop-3000
diff --git a/kern/src/net/tcp.c b/kern/src/net/tcp.c
index 898eb87..0f9c4ac 100644
--- a/kern/src/net/tcp.c
+++ b/kern/src/net/tcp.c
@@ -399,6 +399,37 @@
 			if (loop++ > 10000)
 				panic("tcpackproc1");
 			tp = t->next;
+			/* this is a little odd.  overall, we wake up once per 'tick' (50ms,
+			 * whatever).  then, we decrement count.  so the timer val is in
+			 * units of 50 ms.  the timer list isn't sorted either.  once
+			 * someone expires, we get moved to another LL, local, and we fire
+			 * those alarms.
+			 *
+			 * the best anyone could do would be 50 ms granularity.
+			 *
+			 * if things are slow, you could skew later too.
+			 *
+			 * actually, you're expected value is 25ms for the first count.  so
+			 * whatever your timer.start is, your wait time is start * 50 - 25.
+			 * 		which is why we wait 25 ms to open up our window again.
+			 *
+			 * might be issues with concurrency.  once the alarm is set to done
+			 * and yanked off the list, what's to stop a concurrent setter from
+			 * putting it back on the list and setting TcptimerON?
+			 * 		there's a lot of lockless peeks at the timer.state
+			 *
+			 * probably be better served with a kthread timer chain
+			 * 		one assumption with the timerchain stuff is that the source
+			 * 		is an IRQ, and thus IRQ context matters, etc.
+			 *
+			 * 		with a kth tchain, we're in kth context already.  and you
+			 * 		probably don't want to send another RKM for each timer.
+			 * 		unless the locking matters.
+			 *
+			 * 		interesting - even the pcpu tchains - should those be a
+			 * 		per-core kth?  does any alarm need to run from IRQ ctx?
+			 * 				maybe.
+			 * */
 			if (t->state == TcptimerON) {
 				t->count--;
 				if (t->count == 0) {
diff --git a/kern/src/ns/qio.c b/kern/src/ns/qio.c
index b9d5d9e..1d9c473 100644
--- a/kern/src/ns/qio.c
+++ b/kern/src/ns/qio.c
@@ -1607,6 +1607,7 @@
 	 * only available via padblock (to the left).  we also need some space
 	 * for pullupblock for some basic headers (like icmp) that get written
 	 * in directly */
+			// XXX might need to increase for TCP opts
 	b = block_alloc(64, mem_flags);
 	if (!b)
 		return 0;
@@ -1657,6 +1658,10 @@
 	do {
 		n = len - sofar;
 		/* This is 64K, the max amount per single block.  Still a good value? */
+		// XXX btw, Maxatomic will probably fragment memory a bit, since the
+		// kmalloc in build_block is a little more than 64K, all together.
+		// 		or don't use kmalloc
+		//
 		if (n > Maxatomic)
 			n = Maxatomic;
 		b = build_block(p + sofar, n, mem_flags);
diff --git a/kern/src/rendez.c b/kern/src/rendez.c
index 576eaf6..a6f324d 100644
--- a/kern/src/rendez.c
+++ b/kern/src/rendez.c
@@ -18,6 +18,12 @@
 	cv_init_irqsave(&rv->cv);
 }
 
+// XXX can we have multiple sleepers on a rendez with different functions?
+// 		if not, we can have the func (+ arg?) in the object
+// 		waker can check the func too, to avoid spurious wakeups
+//
+// 		if so, we could still store the func with some lookup-elem and still
+// 		have the waker run the funcs.
 void rendez_sleep(struct rendez *rv, int (*cond)(void*), void *arg)
 {
 	int8_t irq_state = 0;
diff --git a/kern/src/syscall.c b/kern/src/syscall.c
index 0659e12..60ca4ac 100644
--- a/kern/src/syscall.c
+++ b/kern/src/syscall.c
@@ -456,6 +456,7 @@
 
 /* Callable by any function while executing a syscall (or otherwise, actually).
  */
+// XXX these are fucked - anything touching user memory
 int get_errno(void)
 {
 	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
diff --git a/user/parlib/uthread.c b/user/parlib/uthread.c
index db71ae0..9eb2723 100644
--- a/user/parlib/uthread.c
+++ b/user/parlib/uthread.c
@@ -1430,5 +1430,43 @@
 
 struct uthread *uthread_self(void)
 {
+	// XXX might be pre-vc-ctx-ready?  or just proper fucked.
+	if (in_vcore_context())
+		fprintf(stderr, "OH FUCK, someone did pth_self from vc ctx!\n");
 	return current_uthread;
 }
+
+// consider a signal handler that tries to close an FD or o/w call into glibc.
+// that will panic, since we're in VC ctx.
+// 		could have a thread for it
+// 			though that doesn't help thread0 sched
+// 		for thread0, could mark the thread to inject a signal?
+// 			those are intraprocess signals
+//
+//
+// naming convention for uth ops called by 2LSs vs apps vs libraries?
+// 		e.g. uthread_sleep       (app)
+// 		e.g. uthread_init        (2LS)
+// 		e.g. uthread_yield       (2LS)
+// 		e.g. uthread_has_blocked (libraries)
+// 		e.g. uthread_paused      (libraries)
+// 		e.g. uthread_create      (app)
+// 		e.g. uthread_sched_yield (app)
+//
+// 	newer stuff, client-or-library callable, has been called uth_
+// 		uth_semaphore_down
+// 		uth_blockon_evq
+//
+// 	maybe change all external stuff to uth_
+// 		diff btw libraries and apps?
+// 	and 2LS helpers to uth_2ls_, e.g. uth_2ls_yield
+// 		run_uthread, run_current_uthread, etc.
+//
+// XXX the existing uth_2ls_is_multithreaded
+// 		the 2ls is actually "is the 2ls multithreaded"
+// 		not "2ls" namespacing
+// 		it's somewhat ok.  2ls-related helper
+// 	same with uthread_2ls_init.  it's 'init the 2ls'
+// 		should that be uth_2ls_2ls_init?
+// 		or is the 2ls more of a 'don't fuck with this unless you're a 2ls'?
+
diff --git a/user/parlib/vcore.c b/user/parlib/vcore.c
index c431680..3898aed 100644
--- a/user/parlib/vcore.c
+++ b/user/parlib/vcore.c
@@ -92,6 +92,12 @@
 	if (vcpd->vcore_stack)
 		return 0; // reuse old stack
 
+// XXX consider adding a guard page.  yeah, it fucks with the VMR map
+// 		at least for debugging
+//	force_a_page_fault = ACCESS_ONCE(*(int*)(pt->stacktop - sizeof(int)));
+//			also, can change this in pth code to use the syscall_async (faster)
+//	syscall_async(&uthread->local_sysc, SYS_populate_va, aux, 1);
+
 	void* stackbot = mmap(0, TRANSITION_STACK_SIZE,
 	                      PROT_READ | PROT_WRITE | PROT_EXEC,
 	                      MAP_POPULATE | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
diff --git a/user/pthread/pthread.c b/user/pthread/pthread.c
index 241e89c..305c1e9 100644
--- a/user/pthread/pthread.c
+++ b/user/pthread/pthread.c
@@ -619,6 +619,7 @@
 	                     sizeof(struct pthread_tcb));
 	assert(!ret);
 	memset(pthread, 0, sizeof(struct pthread_tcb));	/* aggressively 0 for bugs*/
+	// XXX this default needs to be used for all task threads
 	pthread->stacksize = PTHREAD_STACK_SIZE;	/* default */
 	pthread->state = PTH_CREATED;
 	pthread->id = get_next_pid();
@@ -699,6 +700,8 @@
 	return 0;
 }
 
+// XXX
+
 int pthread_cancel(pthread_t __th)
 {
 	fprintf(stderr, "Unsupported %s!", __FUNCTION__);