Advertisement
xosski

Vmx/hypervisor/vmm exploitation fun

Jan 9th, 2025
8
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.83 KB | None | 0 0
  1. #include <linux/cpumask.h>
  2. #include <linux/smp.h>
  3. #include <linux/slab.h>
  4. #include <linux/version.h>
  5. #if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0)
  6. #include <linux/cpu.h>
  7. #include <linux/notifier.h>
  8. #else
  9. #include <linux/cpuhotplug.h>
  10. #endif
  11. #include <asm/special_insns.h>
  12. #include <asm/cpufeature.h>
  13.  
  14. #include "cpu.h"
  15. #include "crx.h"
  16. #include "msr.h"
  17. #include "mem.h"
  18. #include "asm.h"
  19. #include "enc.h"
  20.  
  21. #define VCPU_DBG(fmt, ...) \
  22. pr_debug("vcpu [cpu %02d] - " fmt, cur_logical_cpu(), ##__VA_ARGS__)
  23.  
  24. #define OLD_CPUHOTPLUG (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
  25.  
  26. // Callback for CPU online
  27. static int cpu_on_cb(unsigned int cpu)
  28. {
  29. // Placeholder for vCPU virtualization
  30. return 0;
  31. }
  32.  
  33. // Callback for CPU offline
  34. static int cpu_off_cb(unsigned int cpu)
  35. {
  36. // Placeholder for restoring vCPU and cleaning up data
  37. return 0;
  38. }
  39.  
  40. #if OLD_CPUHOTPLUG
  41. static int cpu_hotplug_cb(struct notifier_block *nblock,
  42. unsigned long action,
  43. void *hcpu)
  44. {
  45. unsigned int cpu = (unsigned int)(unsigned long)hcpu;
  46.  
  47. switch (action) {
  48. case CPU_ONLINE:
  49. return cpu_on_cb(cpu);
  50. case CPU_OFFLINE:
  51. return cpu_off_cb(cpu);
  52. default:
  53. return NOTIFY_DONE;
  54. }
  55. }
  56.  
  57. static struct notifier_block cpu_hotplug_notifier = {
  58. .notifier_call = cpu_hotplug_cb,
  59. };
  60. #endif
  61.  
  62. int cpu_hotplug_register(void)
  63. {
  64. int ret = 1;
  65.  
  66. #if OLD_CPUHOTPLUG
  67. register_cpu_notifier(&cpu_hotplug_notifier);
  68. #else
  69. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
  70. "vcpu:online",
  71. cpu_on_cb,
  72. cpu_off_cb);
  73. if (ret < 0) {
  74. pr_err("Failed to register CPU hotplug callback: %d\n", ret);
  75. return 0;
  76. }
  77. #endif
  78.  
  79. VCPU_DBG("Registered CPU hotplug callback");
  80. return ret;
  81. }
  82.  
  83. void cpu_hotplug_unregister(void)
  84. {
  85. #if OLD_CPUHOTPLUG
  86. unregister_cpu_notifier(&cpu_hotplug_notifier);
  87. #else
  88. cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
  89. #endif
  90.  
  91. VCPU_DBG("Unregistered CPU hotplug callback");
  92. }
  93.  
  94. int max_logical_cpu(void)
  95. {
  96. return num_online_cpus();
  97. }
  98.  
  99. int cur_logical_cpu(void)
  100. {
  101. return smp_processor_id();
  102. }
  103.  
  104. static inline bool vcpu_supports_vmx(void)
  105. {
  106. return this_cpu_has(X86_FEATURE_VMX);
  107. }
  108.  
  109. static int vcpu_set_feature_control(void)
  110. {
  111. ia32_feature_control_t feat_ctl = {0};
  112.  
  113. feat_ctl.value = __rdmsr(IA32_FEATURE_CONTROL_MSR);
  114.  
  115. if (feat_ctl.fields.locked) {
  116. if (!feat_ctl.fields.vmx_outside_smx) {
  117. VCPU_DBG("VMX operation disabled by BIOS/UEFI firmware");
  118. return -EINVAL;
  119. }
  120. VCPU_DBG("VMX operation already enabled");
  121. return 0;
  122. }
  123.  
  124. feat_ctl.fields.locked = 1;
  125. feat_ctl.fields.vmx_outside_smx = 1;
  126. __wrmsr(IA32_FEATURE_CONTROL_MSR, (feat_ctl.value >> 32), feat_ctl.value);
  127.  
  128. feat_ctl.value = __rdmsr(IA32_FEATURE_CONTROL_MSR);
  129.  
  130. if (!feat_ctl.fields.locked || !feat_ctl.fields.vmx_outside_smx) {
  131. VCPU_DBG("Failed to enable VMX operation");
  132. return -EIO;
  133. }
  134.  
  135. return 0;
  136. }
  137.  
  138. static void vcpu_set_cr_fixed(void)
  139. {
  140. cr0_t cr0 = {0};
  141. cr4_t cr4 = {0};
  142. ia32_gen_fixed_t fixed = {0};
  143.  
  144. cr4.value = __read_cr4();
  145. cr4.flags.VMXE = 1;
  146. __write_cr4(cr4.value);
  147.  
  148. cr0.value = __read_cr0();
  149. fixed.value = __rdmsr(IA32_VMX_CR0_FIXED0_MSR);
  150. cr0.value |= fixed.split.low;
  151. fixed.value = __rdmsr(IA32_VMX_CR0_FIXED1_MSR);
  152. cr0.value &= fixed.split.low;
  153. __write_cr0(cr0.value);
  154.  
  155. cr4.value = __read_cr4();
  156. fixed.value = __rdmsr(IA32_VMX_CR4_FIXED0_MSR);
  157. cr4.value |= fixed.split.low;
  158. fixed.value = __rdmsr(IA32_VMX_CR4_FIXED1_MSR);
  159. cr4.value &= fixed.split.low;
  160. __write_cr4(cr4.value);
  161. }
  162.  
  163. vcpu_ctx_t *vcpu_alloc(void)
  164. {
  165. vcpu_ctx_t *vcpu_ctx = kmalloc(sizeof(*vcpu_ctx), GFP_KERNEL);
  166.  
  167. if (!vcpu_ctx) {
  168. pr_err("Failed to allocate vCPU context\n");
  169. return NULL;
  170. }
  171.  
  172. vcpu_ctx->vmxon_region = mem_alloc_pages(0);
  173. if (!vcpu_ctx->vmxon_region) {
  174. pr_err("Failed to allocate VMXON region\n");
  175. kfree(vcpu_ctx);
  176. return NULL;
  177. }
  178. vcpu_ctx->vmxon_physical = mem_virt_to_phys(vcpu_ctx->vmxon_region);
  179.  
  180. vcpu_ctx->vmcs_region = mem_alloc_pages(0);
  181. if (!vcpu_ctx->vmcs_region) {
  182. pr_err("Failed to allocate VMCS region\n");
  183. mem_free_pages((unsigned long)vcpu_ctx->vmxon_region, 0);
  184. kfree(vcpu_ctx);
  185. return NULL;
  186. }
  187. vcpu_ctx->vmcs_physical = mem_virt_to_phys(vcpu_ctx->vmcs_region);
  188.  
  189. ia32_vmx_basic_t basic = {0};
  190. basic.value = __rdmsr(IA32_VMX_BASIC_MSR);
  191. vcpu_ctx->vmxon_region->reserved.rev_ident = basic.fields.vmcs_rev_ident;
  192. vcpu_ctx->vmcs_region->reserved.rev_ident = basic.fields.vmcs_rev_ident;
  193.  
  194. return vcpu_ctx;
  195. }
  196.  
  197. void vcpu_free(vcpu_ctx_t *vcpu_ctx)
  198. {
  199. if (!vcpu_ctx)
  200. return;
  201.  
  202. if (vcpu_ctx->vmxon_region)
  203. mem_free_pages((unsigned long)vcpu_ctx->vmxon_region, 0);
  204.  
  205. if (vcpu_ctx->vmcs_region)
  206. mem_free_pages((unsigned long)vcpu_ctx->vmcs_region, 0);
  207.  
  208. kfree(vcpu_ctx);
  209. }
  210.  
  211. void vcpu_init(void *info)
  212. {
  213. vmm_ctx_t *vmm_ctx = info;
  214. int cpu = cur_logical_cpu();
  215.  
  216. if (!vcpu_supports_vmx()) {
  217. VCPU_DBG("VMX not supported");
  218. return;
  219. }
  220.  
  221. if (vcpu_set_feature_control()) {
  222. VCPU_DBG("Failed to set IA32_FEATURE_CONTROL MSR");
  223. return;
  224. }
  225.  
  226. vcpu_set_cr_fixed();
  227.  
  228. vcpu_ctx_t *vcpu_ctx = vmm_ctx->vcpu_ctxs[cpu];
  229.  
  230. if (__vmx_on(vcpu_ctx->vmxon_physical)) {
  231. VCPU_DBG("VMXON failed");
  232. return;
  233. }
  234.  
  235. VCPU_DBG("VMXON successful");
  236. }
  237.  
  238. void vcpu_restore(void *info)
  239. {
  240. __vmx_off();
  241. VCPU_DBG("VMXOFF executed");
  242. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement