vmm: Remove nasty rdmsr() macro (XCC)

This cleaned up the kernel's MSR code a bit.  The rdmsr() macro actually
cluttered up some of the code, especially in the "just read the host MSRs
and copy to the VMCS" section.

Also, the order of the parameters to rdmsr() was unclear.  In fact, we had
a bug in the user version of the code for emsr_ok (rdx was first, not
second).  Whoops.

We had another user bug in miscenable, where we didn't track PEBS properly.
I just removed that function completely, since it is never called.  Similar
with emsr_mustmatch, both for the kernel and user.

Also note that the user MSR code will panic if you use it.  Previously,
you'd get a GPF if you tried an MSR instruction from userspace.  Instead of
just deleting it all, I left it there with a panic().  Someone else can use
the #arch device to access the MSRs if we feel it is useful.

Reinstall your kernel headers.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/arch/x86/ros/vmx.h b/kern/arch/x86/ros/vmx.h
index e0a008e..4dfe24e 100644
--- a/kern/arch/x86/ros/vmx.h
+++ b/kern/arch/x86/ros/vmx.h
@@ -646,15 +646,6 @@
 
 typedef uint64_t gpa_t;
 typedef uint64_t gva_t;
-/* TODO: remove these nasty macros */
-#define rdmsrl(msr, val) (val) = read_msr((msr))
-#define rdmsr(msr, low, high)                                                  \
-do {                                                                           \
-	uint64_t m = read_msr(msr);                                                \
-	                                                                           \
-	low = m & 0xffffffff;                                                      \
-	high = m >> 32;                                                            \
-} while (0)
 
 struct vmx_capability {
 	uint32_t ept;
diff --git a/kern/arch/x86/vmm/intel/vmx.c b/kern/arch/x86/vmm/intel/vmx.c
index f656aeb..f21dadb 100644
--- a/kern/arch/x86/vmm/intel/vmx.c
+++ b/kern/arch/x86/vmm/intel/vmx.c
@@ -404,12 +404,15 @@
 {
 	bool err = false;
 	uint32_t vmx_msr_low, vmx_msr_high;
+	uint64_t msr_val;
 	uint32_t reserved_0, reserved_1, changeable_bits, try0, try1;
 
 	if (have_true_msr)
-		rdmsr(v->truemsr, vmx_msr_low, vmx_msr_high);
+		msr_val = read_msr(v->truemsr);
 	else
-		rdmsr(v->msr, vmx_msr_low, vmx_msr_high);
+		msr_val = read_msr(v->msr);
+	vmx_msr_high = high32(msr_val);
+	vmx_msr_low = low32(msr_val);
 
 	if (vmx_msr_low & ~vmx_msr_high)
 		warn("JACKPOT: Conflicting VMX ec ctls for %s, high 0x%08x low 0x%08x",
@@ -673,7 +676,9 @@
 
 	/* Read in the caps for runtime checks.  This MSR is only available if
 	 * secondary controls and ept or vpid is on, which we check earlier */
-	rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, vmx_capability.ept, vmx_capability.vpid);
+	vmx_msr = read_msr(MSR_IA32_VMX_EPT_VPID_CAP);
+	vmx_capability.vpid = high32(vmx_msr);
+	vmx_capability.ept = low32(vmx_msr);
 
 	*ret = 0;
 }
@@ -722,8 +727,6 @@
  */
 static void vmx_setup_constant_host_state(void)
 {
-	uint32_t low32, high32;
-	unsigned long tmpl;
 	pseudodesc_t dt;
 
 	vmcs_writel(HOST_CR0, rcr0() & ~X86_CR0_TS);	/* 22.2.3 */
@@ -742,18 +745,13 @@
 	extern void vmexit_handler(void);
 	vmcs_writel(HOST_RIP, (unsigned long)vmexit_handler);
 
-	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
-	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-	rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
-	vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);	/* 22.2.3 */
+	vmcs_write32(HOST_IA32_SYSENTER_CS, read_msr(MSR_IA32_SYSENTER_CS));
+	vmcs_writel(HOST_IA32_SYSENTER_EIP, read_msr(MSR_IA32_SYSENTER_EIP));
 
-	rdmsr(MSR_EFER, low32, high32);
-	vmcs_write32(HOST_IA32_EFER, low32);
+	vmcs_write32(HOST_IA32_EFER, read_msr(MSR_EFER));
 
-	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
-		rdmsr(MSR_IA32_CR_PAT, low32, high32);
-		vmcs_write64(HOST_IA32_PAT, low32 | ((uint64_t) high32 << 32));
-	}
+	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT)
+		vmcs_write64(HOST_IA32_PAT, read_msr(MSR_IA32_CR_PAT));
 
 	vmcs_write16(HOST_FS_SELECTOR, 0);	/* 22.2.4 */
 	vmcs_write16(HOST_GS_SELECTOR, 0);	/* 22.2.4 */
@@ -1131,7 +1129,7 @@
 		return -EBUSY;
 	}
 
-	rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
+	old = read_msr(MSR_IA32_FEATURE_CONTROL);
 
 	test_bits = FEATURE_CONTROL_LOCKED;
 	test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
diff --git a/user/vmm/vmxmsr.c b/user/vmm/vmxmsr.c
index 48aa1fb..39ee1ce 100644
--- a/user/vmm/vmxmsr.c
+++ b/user/vmm/vmxmsr.c
@@ -38,25 +38,17 @@
 	bool written;
 	uint32_t edx, eax;
 };
-// Might need to mfence rdmsr.  supposedly wrmsr serializes, but not for x2APIC
+
 static inline uint64_t read_msr(uint32_t reg)
 {
-	uint32_t edx, eax;
-	asm volatile("rdmsr; mfence" : "=d"(edx), "=a"(eax) : "c"(reg));
-	return (uint64_t)edx << 32 | eax;
+	panic("Not implemented for userspace");
 }
 
 static inline void write_msr(uint32_t reg, uint64_t val)
 {
-	asm volatile("wrmsr" : : "d"((uint32_t)(val >> 32)),
-	                         "a"((uint32_t)(val & 0xFFFFFFFF)),
-	                         "c"(reg));
+	panic("Not implemented for userspace");
 }
 
-static int emsr_miscenable(struct guest_thread *vm_thread, struct emmsr *,
-                           uint32_t);
-static int emsr_mustmatch(struct guest_thread *vm_thread, struct emmsr *,
-                          uint32_t);
 static int emsr_readonly(struct guest_thread *vm_thread, struct emmsr *,
                          uint32_t);
 static int emsr_readzero(struct guest_thread *vm_thread, struct emmsr *,
@@ -69,84 +61,29 @@
 	{MSR_RAPL_POWER_UNIT, "MSR_RAPL_POWER_UNIT", emsr_readzero},
 };
 
-static uint64_t set_low32(uint64_t hi, uint32_t lo)
+static inline uint32_t low32(uint64_t val)
 {
-	return (hi & 0xffffffff00000000ULL) | lo;
+	return val & 0xffffffff;
 }
 
-static uint64_t set_low16(uint64_t hi, uint16_t lo)
+static inline uint32_t high32(uint64_t val)
 {
-	return (hi & 0xffffffffffff0000ULL) | lo;
-}
-
-static uint64_t set_low8(uint64_t hi, uint8_t lo)
-{
-	return (hi & 0xffffffffffffff00ULL) | lo;
-}
-
-/* this may be the only register that needs special handling.
- * If there others then we might want to extend teh emmsr struct.
- */
-static int emsr_miscenable(struct guest_thread *vm_thread, struct emmsr *msr,
-                           uint32_t opcode) {
-	uint32_t eax, edx;
-	struct vm_trapframe *vm_tf = &(vm_thread->uthread.u_ctx.tf.vm_tf);
-
-	rdmsr(msr->reg, eax, edx);
-	/* we just let them read the misc msr for now. */
-	if (opcode == EXIT_REASON_MSR_READ) {
-		vm_tf->tf_rax = set_low32(vm_tf->tf_rax, eax);
-		vm_tf->tf_rax |= MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
-		vm_tf->tf_rdx = set_low32(vm_tf->tf_rdx, edx);
-		return 0;
-	} else {
-		/* if they are writing what is already written, that's ok. */
-		if (((uint32_t) vm_tf->tf_rax == eax)
-		    && ((uint32_t) vm_tf->tf_rdx == edx))
-			return 0;
-	}
-	fprintf(stderr,
-		"%s: Wanted to write 0x%x:0x%x, but could not; value was 0x%x:0x%x\n",
-		 msr->name, (uint32_t) vm_tf->tf_rdx,
-		 (uint32_t) vm_tf->tf_rax, edx, eax);
-	return SHUTDOWN_UNHANDLED_EXIT_REASON;
-}
-
-static int emsr_mustmatch(struct guest_thread *vm_thread, struct emmsr *msr,
-                          uint32_t opcode) {
-	uint32_t eax, edx;
-	struct vm_trapframe *vm_tf = &(vm_thread->uthread.u_ctx.tf.vm_tf);
-
-	rdmsr(msr->reg, eax, edx);
-	/* we just let them read the misc msr for now. */
-	if (opcode == EXIT_REASON_MSR_READ) {
-		vm_tf->tf_rax = set_low32(vm_tf->tf_rax, eax);
-		vm_tf->tf_rdx = set_low32(vm_tf->tf_rdx, edx);
-		return 0;
-	} else {
-		/* if they are writing what is already written, that's ok. */
-		if (((uint32_t) vm_tf->tf_rax == eax)
-		    && ((uint32_t) vm_tf->tf_rdx == edx))
-			return 0;
-	}
-	fprintf(stderr,
-		"%s: Wanted to write 0x%x:0x%x, but could not; value was 0x%x:0x%x\n",
-		 msr->name, (uint32_t) vm_tf->tf_rdx,
-		 (uint32_t) vm_tf->tf_rax, edx, eax);
-	return SHUTDOWN_UNHANDLED_EXIT_REASON;
+	return val >> 32;
 }
 
 static int emsr_ok(struct guest_thread *vm_thread, struct emmsr *msr,
                    uint32_t opcode)
 {
 	struct vm_trapframe *vm_tf = &(vm_thread->uthread.u_ctx.tf.vm_tf);
+	uint64_t msr_val;
 
 	if (opcode == EXIT_REASON_MSR_READ) {
-		rdmsr(msr->reg, vm_tf->tf_rdx, vm_tf->tf_rax);
+		msr_val = read_msr(msr->reg);
+		vm_tf->tf_rax = low32(msr_val);
+		vm_tf->tf_rdx = high32(msr_val);
 	} else {
-		uint64_t val =
-			(uint64_t) vm_tf->tf_rdx << 32 | vm_tf->tf_rax;
-		write_msr(msr->reg, val);
+		msr_val = (vm_tf->tf_rdx << 32) | vm_tf->tf_rax;
+		write_msr(msr->reg, msr_val);
 	}
 	return 0;
 }
@@ -154,14 +91,13 @@
 static int emsr_readonly(struct guest_thread *vm_thread, struct emmsr *msr,
                          uint32_t opcode)
 {
-	uint32_t eax, edx;
 	struct vm_trapframe *vm_tf = &(vm_thread->uthread.u_ctx.tf.vm_tf);
+	uint64_t msr_val;
 
-	rdmsr((uint32_t) vm_tf->tf_rcx, eax, edx);
-	/* we just let them read the misc msr for now. */
+	msr_val = read_msr(msr->reg);
 	if (opcode == EXIT_REASON_MSR_READ) {
-		vm_tf->tf_rax = set_low32(vm_tf->tf_rax, eax);
-		vm_tf->tf_rdx = set_low32(vm_tf->tf_rdx, edx);
+		vm_tf->tf_rax = low32(msr_val);
+		vm_tf->tf_rdx = high32(msr_val);
 		return 0;
 	}
 
@@ -189,24 +125,22 @@
                           uint32_t opcode)
 {
 	uint32_t eax, edx;
+	uint64_t msr_val;
 	struct vm_trapframe *vm_tf = &(vm_thread->uthread.u_ctx.tf.vm_tf);
 
 	if (!msr->written) {
-		rdmsr(msr->reg, eax, edx);
+		msr_val = read_msr(msr->reg);
+		eax = low32(msr_val);
+		edx = high32(msr_val);
 	} else {
-		edx = msr->edx;
 		eax = msr->eax;
+		edx = msr->edx;
 	}
-	/* we just let them read the misc msr for now. */
 	if (opcode == EXIT_REASON_MSR_READ) {
-		vm_tf->tf_rax = set_low32(vm_tf->tf_rax, eax);
-		vm_tf->tf_rdx = set_low32(vm_tf->tf_rdx, edx);
+		vm_tf->tf_rax = eax;
+		vm_tf->tf_rdx = edx;
 		return 0;
 	} else {
-		/* if they are writing what is already written, that's ok. */
-		if (((uint32_t) vm_tf->tf_rax == eax)
-		    && ((uint32_t) vm_tf->tf_rdx == edx))
-			return 0;
 		msr->edx = vm_tf->tf_rdx;
 		msr->eax = vm_tf->tf_rax;
 		msr->written = true;