Support atomic printks
printk() was atomic at the granularity of a line - most of the time. It
wasn't for kernel faults, which leads to inscrutable bug reports. At the
time, I avoided the lock to avoid deadlocking. Instead, we can use a
recursive lock, which I'm not normally a fan of.
Additionally, other callers can use print_lock() to atomically print
multiple lines. The classic example is panic() and warn(). panic() has a
few issues of its own, so we have print_unlock_force().
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/include/stdio.h b/kern/include/stdio.h
index ce0f048..fd9cbaf 100644
--- a/kern/include/stdio.h
+++ b/kern/include/stdio.h
@@ -35,6 +35,9 @@
void vprintfmt(void (*putch)(int, void**), void **putdat, const char *fmt, va_list);
// lib/printf.c
+void print_lock(void);
+void print_unlock(void);
+void print_unlock_force(void);
int ( cprintf)(const char *fmt, ...);
int vcprintf(const char *fmt, va_list);
diff --git a/kern/src/init.c b/kern/src/init.c
index 28b8e06..60c64a7 100644
--- a/kern/src/init.c
+++ b/kern/src/init.c
@@ -229,11 +229,12 @@
#endif
/* Multiple cores can panic concurrently. We could also panic recursively,
- * which would deadlock. We also only want to automatically backtrace the first
+ * which could deadlock. We also only want to automatically backtrace the first
* time through, since BTs are often the source of panics. Finally, we want to
* know when the other panicking cores are done (or likely to be done) before
- * entering the monitor. */
-static spinlock_t panic_lock = SPINLOCK_INITIALIZER_IRQSAVE;
+ * entering the monitor.
+ *
+ * We'll use the print_lock(), which is recursive, to protect panic_printing. */
static bool panic_printing;
static DEFINE_PERCPU(int, panic_depth);
@@ -244,17 +245,11 @@
void _panic(struct hw_trapframe *hw_tf, const char *file, int line,
const char *fmt, ...)
{
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id_early()];
va_list ap;
- struct per_cpu_info *pcpui;
- /* We're panicing, possibly in a place that can't handle the lock checker */
- pcpui = &per_cpu_info[core_id_early()];
- pcpui->__lock_checking_enabled--;
-
- if (!PERCPU_VAR(panic_depth)) {
- spin_lock_irqsave(&panic_lock);
- panic_printing = true;
- }
+ print_lock();
+ panic_printing = true;
PERCPU_VAR(panic_depth)++;
va_start(ap, fmt);
@@ -278,11 +273,11 @@
}
printk("\n");
- /* If we're here, we panicked and currently hold the lock. We might have
- * panicked recursively. We must unlock unconditionally, since the initial
- * panic (which grabbed the lock) will never run again. */
+ /* If we're here, we panicked and currently hold the print_lock. We might
+ * have panicked recursively. We must unlock unconditionally, since the
+ * initial panic (which grabbed the lock) will never run again. */
panic_printing = false;
- spin_unlock_irqsave(&panic_lock);
+ print_unlock_force();
/* And we have to clear the depth, so that we lock again next time in.
* Otherwise, we'd be unlocking without locking (which is another panic). */
PERCPU_VAR(panic_depth) = 0;
@@ -293,34 +288,39 @@
udelay(500000);
cmb();
} while (panic_printing);
- monitor(NULL);
- if (pcpui->cur_proc) {
- printk("panic killing proc %d\n", pcpui->cur_proc->pid);
- proc_destroy(pcpui->cur_proc);
- }
/* Yikes! We're claiming to be not in IRQ/trap ctx and not holding any
* locks. Obviously we could be wrong, and could easily deadlock. We could
* be in an IRQ handler, an unhandled kernel fault, or just a 'normal' panic
* in a syscall - any of which can involve unrestore invariants. */
pcpui->__ctx_depth = 0;
pcpui->lock_depth = 0;
+ /* And keep this off, for good measure. */
+ pcpui->__lock_checking_enabled--;
+
+ monitor(NULL);
+
+ if (pcpui->cur_proc) {
+ printk("panic killing proc %d\n", pcpui->cur_proc->pid);
+ proc_destroy(pcpui->cur_proc);
+ }
if (pcpui->cur_kthread)
kth_panic_sysc(pcpui->cur_kthread);
smp_idle();
}
-/* like panic, but don't */
void _warn(const char *file, int line, const char *fmt,...)
{
va_list ap;
+ print_lock();
va_start(ap, fmt);
printk("kernel warning at %s:%d, from core %d: ", file, line,
core_id_early());
vcprintf(fmt, ap);
cprintf("\n");
va_end(ap);
+ print_unlock();
}
static void run_links(linker_func_t *linkstart, linker_func_t *linkend)
diff --git a/kern/src/printf.c b/kern/src/printf.c
index 6e54a04..7b415d9 100644
--- a/kern/src/printf.c
+++ b/kern/src/printf.c
@@ -11,7 +11,42 @@
#include <kprof.h>
#include <init.h>
-spinlock_t output_lock = SPINLOCK_INITIALIZER_IRQSAVE;
+/* Recursive lock. Would like to avoid spreading these in the kernel. */
+static spinlock_t output_lock = SPINLOCK_INITIALIZER_IRQSAVE;
+static int output_lock_holder = -1; /* core_id. */
+static int output_lock_count;
+
+void print_lock(void)
+{
+ if (output_lock_holder == core_id_early()) {
+ output_lock_count++;
+ return;
+ }
+ pcpui_var(core_id_early(), __lock_checking_enabled)--;
+ spin_lock_irqsave(&output_lock);
+ output_lock_holder = core_id_early();
+ output_lock_count = 1;
+}
+
+void print_unlock(void)
+{
+ output_lock_count--;
+ if (output_lock_count)
+ return;
+ output_lock_holder = -1;
+ spin_unlock_irqsave(&output_lock);
+ pcpui_var(core_id_early(), __lock_checking_enabled)++;
+}
+
+/* Regardless of where we are, unlock. This is dangerous, and only used when
+ * you know you will never unwind your stack, such as for a panic. */
+void print_unlock_force(void)
+{
+ output_lock_holder = -1;
+ output_lock_count = 0;
+ spin_unlock_irqsave(&output_lock);
+ pcpui_var(core_id_early(), __lock_checking_enabled)++;
+}
void putch(int ch, int **cnt)
{
@@ -43,50 +78,24 @@
int vcprintf(const char *fmt, va_list ap)
{
- struct per_cpu_info *pcpui;
int cnt = 0;
int *cntp = &cnt;
volatile int i;
- int8_t irq_state = 0;
va_list args;
+ print_lock();
+
va_copy(args, ap);
trace_vprintk(fmt, args);
va_end(args);
- /* this ktrap depth stuff is in case the kernel faults in a printfmt call.
- * we disable the locking if we're in a fault handler so that we don't
- * deadlock. */
- if (booting)
- pcpui = &per_cpu_info[0];
- else
- pcpui = &per_cpu_info[core_id()];
- /* lock all output. this will catch any printfs at line granularity. when
- * tracing, we short-circuit the main lock call, so as not to clobber the
- * results as we print. */
- if (!ktrap_depth(pcpui)) {
- #ifdef CONFIG_TRACE_LOCKS
- disable_irqsave(&irq_state);
- __spin_lock(&output_lock);
- #else
- spin_lock_irqsave(&output_lock);
- #endif
- }
-
// do the buffered printf
vprintfmt((void*)buffered_putch, (void*)&cntp, fmt, ap);
// write out remaining chars in the buffer
buffered_putch(-1,&cntp);
- if (!ktrap_depth(pcpui)) {
- #ifdef CONFIG_TRACE_LOCKS
- __spin_unlock(&output_lock);
- enable_irqsave(&irq_state);
- #else
- spin_unlock_irqsave(&output_lock);
- #endif
- }
+ print_unlock();
return cnt;
}