@@ -2906,4 +2906,147 @@ mod tests {
2906
2906
}
2907
2907
assert ! ( vcpu. vcpu_init( & kvi) . is_ok( ) ) ;
2908
2908
}
2909
+
2910
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
2911
+ #[ test]
2912
+ fn test_userspace_rdmsr_exit ( ) {
2913
+ use std:: io:: Write ;
2914
+
2915
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
2916
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
2917
+ #[ rustfmt:: skip]
2918
+ let code = [
2919
+ 0x0F , 0x32 , /* rdmsr */
2920
+ 0xF4 /* hlt */
2921
+ ] ;
2922
+
2923
+ if !vm. check_extension ( Cap :: X86UserSpaceMsr ) {
2924
+ return ;
2925
+ }
2926
+ let cap = kvm_enable_cap {
2927
+ cap : Cap :: X86UserSpaceMsr as u32 ,
2928
+ args : [ MsrExitReason :: Unknown . bits ( ) as u64 , 0 , 0 , 0 ] ,
2929
+ ..Default :: default ( )
2930
+ } ;
2931
+ vm. enable_cap ( & cap) . unwrap ( ) ;
2932
+
2933
+ let mem_size = 0x4000 ;
2934
+ let load_addr = mmap_anonymous ( mem_size) ;
2935
+ let guest_addr: u64 = 0x1000 ;
2936
+ let slot: u32 = 0 ;
2937
+ let mem_region = kvm_userspace_memory_region {
2938
+ slot,
2939
+ guest_phys_addr : guest_addr,
2940
+ memory_size : mem_size as u64 ,
2941
+ userspace_addr : load_addr as u64 ,
2942
+ flags : 0 ,
2943
+ } ;
2944
+ unsafe {
2945
+ vm. set_user_memory_region ( mem_region) . unwrap ( ) ;
2946
+
2947
+ // Get a mutable slice of `mem_size` from `load_addr`.
2948
+ // This is safe because we mapped it before.
2949
+ let mut slice = std:: slice:: from_raw_parts_mut ( load_addr, mem_size) ;
2950
+ slice. write_all ( & code) . unwrap ( ) ;
2951
+ }
2952
+
2953
+ let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
2954
+
2955
+ // Set up special registers
2956
+ let mut vcpu_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
2957
+ assert_ne ! ( vcpu_sregs. cs. base, 0 ) ;
2958
+ assert_ne ! ( vcpu_sregs. cs. selector, 0 ) ;
2959
+ vcpu_sregs. cs . base = 0 ;
2960
+ vcpu_sregs. cs . selector = 0 ;
2961
+ vcpu. set_sregs ( & vcpu_sregs) . unwrap ( ) ;
2962
+
2963
+ // Set the Instruction Pointer to the guest address where we loaded
2964
+ // the code, and RCX to the MSR to be read.
2965
+ let mut vcpu_regs = vcpu. get_regs ( ) . unwrap ( ) ;
2966
+ vcpu_regs. rip = guest_addr;
2967
+ vcpu_regs. rcx = 0x474f4f00 ;
2968
+ vcpu. set_regs ( & vcpu_regs) . unwrap ( ) ;
2969
+
2970
+ match vcpu. run ( ) . unwrap ( ) {
2971
+ VcpuExit :: X86Rdmsr ( exit) => {
2972
+ assert_eq ! ( exit. reason, MsrExitReason :: Unknown ) ;
2973
+ assert_eq ! ( exit. index, 0x474f4f00 ) ;
2974
+ }
2975
+ e => panic ! ( "Unexpected exit: {:?}" , e) ,
2976
+ }
2977
+ }
2978
+
2979
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
2980
+ #[ test]
2981
+ fn test_userspace_wrmsr_exit ( ) {
2982
+ use std:: io:: Write ;
2983
+
2984
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
2985
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
2986
+ #[ rustfmt:: skip]
2987
+ let code = [
2988
+ 0x0F , 0x30 , /* wrmsr */
2989
+ 0xF4 /* hlt */
2990
+ ] ;
2991
+
2992
+ if !vm. check_extension ( Cap :: X86UserSpaceMsr ) {
2993
+ return ;
2994
+ }
2995
+ let cap = kvm_enable_cap {
2996
+ cap : Cap :: X86UserSpaceMsr as u32 ,
2997
+ args : [ MsrExitReason :: Unknown . bits ( ) as u64 , 0 , 0 , 0 ] ,
2998
+ ..Default :: default ( )
2999
+ } ;
3000
+ vm. enable_cap ( & cap) . unwrap ( ) ;
3001
+
3002
+ let mem_size = 0x4000 ;
3003
+ let load_addr = mmap_anonymous ( mem_size) ;
3004
+ let guest_addr: u64 = 0x1000 ;
3005
+ let slot: u32 = 0 ;
3006
+ let mem_region = kvm_userspace_memory_region {
3007
+ slot,
3008
+ guest_phys_addr : guest_addr,
3009
+ memory_size : mem_size as u64 ,
3010
+ userspace_addr : load_addr as u64 ,
3011
+ flags : 0 ,
3012
+ } ;
3013
+ unsafe {
3014
+ vm. set_user_memory_region ( mem_region) . unwrap ( ) ;
3015
+
3016
+ // Get a mutable slice of `mem_size` from `load_addr`.
3017
+ // This is safe because we mapped it before.
3018
+ let mut slice = std:: slice:: from_raw_parts_mut ( load_addr, mem_size) ;
3019
+ slice. write_all ( & code) . unwrap ( ) ;
3020
+ }
3021
+
3022
+ let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
3023
+
3024
+ // Set up special registers
3025
+ let mut vcpu_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
3026
+ assert_ne ! ( vcpu_sregs. cs. base, 0 ) ;
3027
+ assert_ne ! ( vcpu_sregs. cs. selector, 0 ) ;
3028
+ vcpu_sregs. cs . base = 0 ;
3029
+ vcpu_sregs. cs . selector = 0 ;
3030
+ vcpu. set_sregs ( & vcpu_sregs) . unwrap ( ) ;
3031
+
3032
+ // Set the Instruction Pointer to the guest address where we loaded
3033
+ // the code, RCX to the MSR to be written, and EDX:EAX to the data to
3034
+ // be written.
3035
+ let mut vcpu_regs = vcpu. get_regs ( ) . unwrap ( ) ;
3036
+ vcpu_regs. rip = guest_addr;
3037
+ vcpu_regs. rcx = 0x474f4f00 ;
3038
+ vcpu_regs. rax = 0xdeadbeef ;
3039
+ vcpu_regs. rdx = 0xd0c0ffee ;
3040
+ vcpu. set_regs ( & vcpu_regs) . unwrap ( ) ;
3041
+
3042
+ match vcpu. run ( ) . unwrap ( ) {
3043
+ VcpuExit :: X86Wrmsr ( exit) => {
3044
+ assert_eq ! ( exit. reason, MsrExitReason :: Unknown ) ;
3045
+ assert_eq ! ( exit. index, 0x474f4f00 ) ;
3046
+ assert_eq ! ( exit. data & 0xffffffff , 0xdeadbeef ) ;
3047
+ assert_eq ! ( ( exit. data >> 32 ) & 0xffffffff , 0xd0c0ffee ) ;
3048
+ }
3049
+ e => panic ! ( "Unexpected exit: {:?}" , e) ,
3050
+ }
3051
+ }
2909
3052
}
0 commit comments