sched: Remove the idle core interface
It's only used by ARSC code, which no one uses and probably needs a better
approach.
The code for the packed scheduler is buggy as hell:
- __get_any_idle_core doesn't return once it finds an item
- __get_any_idle_core uses uninitialized *spc
- There's no refcounting or interaction with the idle list.
- The UNNAMED_PROC is a problem too (prov -s crashes)
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/include/corerequest.h b/kern/include/corerequest.h
index 4ee0ea4..c637f90 100644
--- a/kern/include/corerequest.h
+++ b/kern/include/corerequest.h
@@ -40,15 +40,6 @@
void __track_core_dealloc_bulk(struct proc *p, uint32_t *pc_arr,
uint32_t nr_cores);
-/* Get/Put an idle core from our pcore list and return its core_id. Don't
- * consider the chosen core in the future when handing out cores to a
- * process. This code assumes that the scheduler that uses it holds a lock
- * for the duration of the call. This will not give out provisioned cores.
- * The gets return the coreid on success, -1 or -error on failure. */
-int __get_any_idle_core(void);
-int __get_specific_idle_core(int coreid);
-void __put_idle_core(int coreid);
-
/* One off functions to make 'pcoreid' the next core chosen by the core
* allocation algorithm (so long as no provisioned cores are still idle), and
* to sort the idle core list for debugging. This code assumes that the
diff --git a/kern/include/schedule.h b/kern/include/schedule.h
index 39b938f..eddcf8b 100644
--- a/kern/include/schedule.h
+++ b/kern/include/schedule.h
@@ -66,16 +66,6 @@
* inappropriate, since we need to know which specific core is now free. */
void avail_res_changed(int res_type, long change);
-/* Get and put idle CG cores. Getting a core removes it from the idle list, and
- * the kernel can do whatever it wants with it. All this means is that the
- * ksched won't hand out that core to a process. This will not give out
- * provisioned cores.
- *
- * The gets return the coreid on success, -1 or -error on failure. */
-int get_any_idle_core(void);
-int get_specific_idle_core(int coreid);
-void put_idle_core(int coreid);
-
/************** Provisioning / Allocating *************/
/* This section is specific to a provisioning ksched. Careful calling any of
* this from generic kernel code, since it might not be present in all kernel
diff --git a/kern/src/corealloc_fcfs.c b/kern/src/corealloc_fcfs.c
index 8df3f02..af4ab7c 100644
--- a/kern/src/corealloc_fcfs.c
+++ b/kern/src/corealloc_fcfs.c
@@ -112,68 +112,6 @@
__track_core_dealloc(p, pc_arr[i]);
}
-/* Get an idle core from our pcore list and return its core_id. Don't
- * consider the chosen core in the future when handing out cores to a
- * process. This code assumes that the scheduler that uses it holds a lock
- * for the duration of the call. This will not give out provisioned cores. */
-int __get_any_idle_core(void)
-{
- struct sched_pcore *spc;
- int ret = -1;
-
- while ((spc = TAILQ_FIRST(&idlecores))) {
- /* Don't take cores that are provisioned to a process */
- if (spc->prov_proc)
- continue;
- assert(!spc->alloc_proc);
- TAILQ_REMOVE(&idlecores, spc, alloc_next);
- ret = spc2pcoreid(spc);
- break;
- }
- return ret;
-}
-
-/* Detect if a pcore is idle or not. */
-/* TODO: if we end up using this a lot, track CG-idleness as a property of
- * the SPC instead of doing a linear search. */
-static bool __spc_is_idle(struct sched_pcore *spc)
-{
- struct sched_pcore *i;
-
- TAILQ_FOREACH(i, &idlecores, alloc_next) {
- if (spc == i)
- return TRUE;
- }
- return FALSE;
-}
-
-/* Same as __get_any_idle_core() except for a specific core id. */
-int __get_specific_idle_core(int coreid)
-{
- struct sched_pcore *spc = pcoreid2spc(coreid);
- int ret = -1;
-
- assert((coreid >= 0) && (coreid < num_cores));
- if (__spc_is_idle(pcoreid2spc(coreid)) && !spc->prov_proc) {
- assert(!spc->alloc_proc);
- TAILQ_REMOVE(&idlecores, spc, alloc_next);
- ret = coreid;
- }
- return ret;
-}
-
-/* Reinsert a core obtained via __get_any_idle_core() or
- * __get_specific_idle_core() back into the idlecore map. This code assumes
- * that the scheduler that uses it holds a lock for the duration of the call.
- * This will not give out provisioned cores. */
-void __put_idle_core(int coreid)
-{
- struct sched_pcore *spc = pcoreid2spc(coreid);
-
- assert((coreid >= 0) && (coreid < num_cores));
- TAILQ_INSERT_TAIL(&idlecores, spc, alloc_next);
-}
-
/* One off function to make 'pcoreid' the next core chosen by the core
* allocation algorithm (so long as no provisioned cores are still idle).
* This code assumes that the scheduler that uses it holds a lock for the
diff --git a/kern/src/corealloc_packed.c b/kern/src/corealloc_packed.c
index 249c89b..9490257 100644
--- a/kern/src/corealloc_packed.c
+++ b/kern/src/corealloc_packed.c
@@ -422,59 +422,6 @@
__track_core_dealloc(p, pc_arr[i]);
}
-/* Get an idle core from our pcore list and return its core_id. Don't
- * consider the chosen core in the future when handing out cores to a
- * process. This code assumes that the scheduler that uses it holds a lock
- * for the duration of the call. This will not give out provisioned cores. */
-int __get_any_idle_core(void)
-{
- struct sched_pcore *spc;
- int ret = -1;
-
- for (int i = 0; i < num_cores; i++) {
- struct sched_pcore *c = &all_pcores[i];
-
- if (spc->alloc_proc == NULL) {
- spc->alloc_proc = UNNAMED_PROC;
- ret = spc->core_info->core_id;
- }
- }
- return ret;
-}
-
-/* Detect if a pcore is idle or not. */
-static bool __spc_is_idle(struct sched_pcore *spc)
-{
- return (spc->alloc_proc == NULL);
-}
-
-/* Same as __get_any_idle_core() except for a specific core id. */
-int __get_specific_idle_core(int coreid)
-{
- struct sched_pcore *spc = pcoreid2spc(coreid);
- int ret = -1;
-
- assert((coreid >= 0) && (coreid < num_cores));
- if (__spc_is_idle(pcoreid2spc(coreid)) && !spc->prov_proc) {
- assert(!spc->alloc_proc);
- spc->alloc_proc = UNNAMED_PROC;
- ret = coreid;
- }
- return ret;
-}
-
-/* Reinsert a core obtained via __get_any_idle_core() or
- * __get_specific_idle_core() back into the idlecore map. This code assumes
- * that the scheduler that uses it holds a lock for the duration of the call.
- * This will not give out provisioned cores. */
-void __put_idle_core(int coreid)
-{
- struct sched_pcore *spc = pcoreid2spc(coreid);
-
- assert((coreid >= 0) && (coreid < num_cores));
- spc->alloc_proc = NULL;
-}
-
/* One off function to make 'pcoreid' the next core chosen by the core
* allocation algorithm (so long as no provisioned cores are still idle).
* This code assumes that the scheduler that uses it holds a lock for the
diff --git a/kern/src/schedule.c b/kern/src/schedule.c
index 7f4087d..d76055d 100644
--- a/kern/src/schedule.c
+++ b/kern/src/schedule.c
@@ -111,6 +111,9 @@
spin_unlock(&sched_lock);
#ifdef CONFIG_ARSC_SERVER
+ /* Most likely we'll have a syscall and a process that dedicates itself to
+ * running this. Or if it's a kthread, we don't need a core. */
+ #error "Find a way to get a core. Probably a syscall to run a server."
int arsc_coreid = get_any_idle_core();
assert(arsc_coreid >= 0);
send_kernel_message(arsc_coreid, arsc_server, 0, 0, 0, KMSG_ROUTINE);
@@ -495,30 +498,6 @@
printk("[kernel] ksched doesn't track any resources yet!\n");
}
-int get_any_idle_core(void)
-{
- spin_lock(&sched_lock);
- int ret = __get_any_idle_core();
- spin_unlock(&sched_lock);
- return ret;
-}
-
-int get_specific_idle_core(int coreid)
-{
- spin_lock(&sched_lock);
- int ret = __get_specific_idle_core(coreid);
- spin_unlock(&sched_lock);
- return ret;
-}
-
-/* similar to __sched_put_idle_core, but without the prov tracking */
-void put_idle_core(int coreid)
-{
- spin_lock(&sched_lock);
- __put_idle_core(coreid);
- spin_unlock(&sched_lock);
-}
-
/* This deals with a request for more cores. The amt of new cores needed is
* passed in. The ksched lock is held, but we are free to unlock if we want
* (and we must, if calling out of the ksched to anything high-level).