kvm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. /*
  2. * KVM paravirt_ops implementation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  19. * Copyright IBM Corporation, 2007
  20. * Authors: Anthony Liguori <aliguori@us.ibm.com>
  21. */
  22. #include <linux/context_tracking.h>
  23. #include <linux/init.h>
  24. #include <linux/kernel.h>
  25. #include <linux/kvm_para.h>
  26. #include <linux/cpu.h>
  27. #include <linux/mm.h>
  28. #include <linux/highmem.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/notifier.h>
  31. #include <linux/reboot.h>
  32. #include <linux/hash.h>
  33. #include <linux/sched.h>
  34. #include <linux/slab.h>
  35. #include <linux/kprobes.h>
  36. #include <linux/debugfs.h>
  37. #include <linux/nmi.h>
  38. #include <linux/swait.h>
  39. #include <asm/timer.h>
  40. #include <asm/cpu.h>
  41. #include <asm/traps.h>
  42. #include <asm/desc.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/idle.h>
  45. #include <asm/apic.h>
  46. #include <asm/apicdef.h>
  47. #include <asm/hypervisor.h>
  48. #include <asm/kvm_guest.h>
  49. static int kvmapf = 1;
  50. static int parse_no_kvmapf(char *arg)
  51. {
  52. kvmapf = 0;
  53. return 0;
  54. }
  55. early_param("no-kvmapf", parse_no_kvmapf);
  56. static int steal_acc = 1;
  57. static int parse_no_stealacc(char *arg)
  58. {
  59. steal_acc = 0;
  60. return 0;
  61. }
  62. early_param("no-steal-acc", parse_no_stealacc);
  63. static int kvmclock_vsyscall = 1;
  64. static int parse_no_kvmclock_vsyscall(char *arg)
  65. {
  66. kvmclock_vsyscall = 0;
  67. return 0;
  68. }
  69. early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
  70. static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
  71. static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
  72. static int has_steal_clock = 0;
  73. /*
  74. * No need for any "IO delay" on KVM
  75. */
  76. static void kvm_io_delay(void)
  77. {
  78. }
  79. #define KVM_TASK_SLEEP_HASHBITS 8
  80. #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
  81. struct kvm_task_sleep_node {
  82. struct hlist_node link;
  83. struct swait_queue_head wq;
  84. u32 token;
  85. int cpu;
  86. bool halted;
  87. };
  88. static struct kvm_task_sleep_head {
  89. raw_spinlock_t lock;
  90. struct hlist_head list;
  91. } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
  92. static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
  93. u32 token)
  94. {
  95. struct hlist_node *p;
  96. hlist_for_each(p, &b->list) {
  97. struct kvm_task_sleep_node *n =
  98. hlist_entry(p, typeof(*n), link);
  99. if (n->token == token)
  100. return n;
  101. }
  102. return NULL;
  103. }
  104. void kvm_async_pf_task_wait(u32 token)
  105. {
  106. u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
  107. struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
  108. struct kvm_task_sleep_node n, *e;
  109. DECLARE_SWAITQUEUE(wait);
  110. rcu_irq_enter();
  111. raw_spin_lock(&b->lock);
  112. e = _find_apf_task(b, token);
  113. if (e) {
  114. /* dummy entry exist -> wake up was delivered ahead of PF */
  115. hlist_del(&e->link);
  116. kfree(e);
  117. raw_spin_unlock(&b->lock);
  118. rcu_irq_exit();
  119. return;
  120. }
  121. n.token = token;
  122. n.cpu = smp_processor_id();
  123. n.halted = is_idle_task(current) || preempt_count() > 1 ||
  124. rcu_preempt_depth();
  125. init_swait_queue_head(&n.wq);
  126. hlist_add_head(&n.link, &b->list);
  127. raw_spin_unlock(&b->lock);
  128. for (;;) {
  129. if (!n.halted)
  130. prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
  131. if (hlist_unhashed(&n.link))
  132. break;
  133. rcu_irq_exit();
  134. if (!n.halted) {
  135. local_irq_enable();
  136. schedule();
  137. local_irq_disable();
  138. } else {
  139. /*
  140. * We cannot reschedule. So halt.
  141. */
  142. native_safe_halt();
  143. local_irq_disable();
  144. }
  145. rcu_irq_enter();
  146. }
  147. if (!n.halted)
  148. finish_swait(&n.wq, &wait);
  149. rcu_irq_exit();
  150. return;
  151. }
  152. EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
  153. static void apf_task_wake_one(struct kvm_task_sleep_node *n)
  154. {
  155. hlist_del_init(&n->link);
  156. if (n->halted)
  157. smp_send_reschedule(n->cpu);
  158. else if (swait_active(&n->wq))
  159. swake_up(&n->wq);
  160. }
  161. static void apf_task_wake_all(void)
  162. {
  163. int i;
  164. for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
  165. struct hlist_node *p, *next;
  166. struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
  167. raw_spin_lock(&b->lock);
  168. hlist_for_each_safe(p, next, &b->list) {
  169. struct kvm_task_sleep_node *n =
  170. hlist_entry(p, typeof(*n), link);
  171. if (n->cpu == smp_processor_id())
  172. apf_task_wake_one(n);
  173. }
  174. raw_spin_unlock(&b->lock);
  175. }
  176. }
  177. void kvm_async_pf_task_wake(u32 token)
  178. {
  179. u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
  180. struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
  181. struct kvm_task_sleep_node *n;
  182. if (token == ~0) {
  183. apf_task_wake_all();
  184. return;
  185. }
  186. again:
  187. raw_spin_lock(&b->lock);
  188. n = _find_apf_task(b, token);
  189. if (!n) {
  190. /*
  191. * async PF was not yet handled.
  192. * Add dummy entry for the token.
  193. */
  194. n = kzalloc(sizeof(*n), GFP_ATOMIC);
  195. if (!n) {
  196. /*
  197. * Allocation failed! Busy wait while other cpu
  198. * handles async PF.
  199. */
  200. raw_spin_unlock(&b->lock);
  201. cpu_relax();
  202. goto again;
  203. }
  204. n->token = token;
  205. n->cpu = smp_processor_id();
  206. init_swait_queue_head(&n->wq);
  207. hlist_add_head(&n->link, &b->list);
  208. } else
  209. apf_task_wake_one(n);
  210. raw_spin_unlock(&b->lock);
  211. return;
  212. }
  213. EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
  214. u32 kvm_read_and_reset_pf_reason(void)
  215. {
  216. u32 reason = 0;
  217. if (__this_cpu_read(apf_reason.enabled)) {
  218. reason = __this_cpu_read(apf_reason.reason);
  219. __this_cpu_write(apf_reason.reason, 0);
  220. }
  221. return reason;
  222. }
  223. EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
  224. NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
  225. dotraplinkage void
  226. do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
  227. {
  228. enum ctx_state prev_state;
  229. switch (kvm_read_and_reset_pf_reason()) {
  230. default:
  231. trace_do_page_fault(regs, error_code);
  232. break;
  233. case KVM_PV_REASON_PAGE_NOT_PRESENT:
  234. /* page is swapped out by the host. */
  235. prev_state = exception_enter();
  236. exit_idle();
  237. kvm_async_pf_task_wait((u32)read_cr2());
  238. exception_exit(prev_state);
  239. break;
  240. case KVM_PV_REASON_PAGE_READY:
  241. rcu_irq_enter();
  242. exit_idle();
  243. kvm_async_pf_task_wake((u32)read_cr2());
  244. rcu_irq_exit();
  245. break;
  246. }
  247. }
  248. NOKPROBE_SYMBOL(do_async_page_fault);
  249. static void __init paravirt_ops_setup(void)
  250. {
  251. pv_info.name = "KVM";
  252. if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
  253. pv_cpu_ops.io_delay = kvm_io_delay;
  254. #ifdef CONFIG_X86_IO_APIC
  255. no_timer_check = 1;
  256. #endif
  257. }
  258. static void kvm_register_steal_time(void)
  259. {
  260. int cpu = smp_processor_id();
  261. struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
  262. if (!has_steal_clock)
  263. return;
  264. wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
  265. pr_info("kvm-stealtime: cpu %d, msr %llx\n",
  266. cpu, (unsigned long long) slow_virt_to_phys(st));
  267. }
  268. static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
  269. static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
  270. {
  271. /**
  272. * This relies on __test_and_clear_bit to modify the memory
  273. * in a way that is atomic with respect to the local CPU.
  274. * The hypervisor only accesses this memory from the local CPU so
  275. * there's no need for lock or memory barriers.
  276. * An optimization barrier is implied in apic write.
  277. */
  278. if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
  279. return;
  280. apic_write(APIC_EOI, APIC_EOI_ACK);
  281. }
  282. static void kvm_guest_cpu_init(void)
  283. {
  284. if (!kvm_para_available())
  285. return;
  286. if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
  287. u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
  288. #ifdef CONFIG_PREEMPT
  289. pa |= KVM_ASYNC_PF_SEND_ALWAYS;
  290. #endif
  291. wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
  292. __this_cpu_write(apf_reason.enabled, 1);
  293. printk(KERN_INFO"KVM setup async PF for cpu %d\n",
  294. smp_processor_id());
  295. }
  296. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
  297. unsigned long pa;
  298. /* Size alignment is implied but just to make it explicit. */
  299. BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
  300. __this_cpu_write(kvm_apic_eoi, 0);
  301. pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
  302. | KVM_MSR_ENABLED;
  303. wrmsrl(MSR_KVM_PV_EOI_EN, pa);
  304. }
  305. if (has_steal_clock)
  306. kvm_register_steal_time();
  307. }
  308. static void kvm_pv_disable_apf(void)
  309. {
  310. if (!__this_cpu_read(apf_reason.enabled))
  311. return;
  312. wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
  313. __this_cpu_write(apf_reason.enabled, 0);
  314. printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
  315. smp_processor_id());
  316. }
  317. static void kvm_pv_guest_cpu_reboot(void *unused)
  318. {
  319. /*
  320. * We disable PV EOI before we load a new kernel by kexec,
  321. * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
  322. * New kernel can re-enable when it boots.
  323. */
  324. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  325. wrmsrl(MSR_KVM_PV_EOI_EN, 0);
  326. kvm_pv_disable_apf();
  327. kvm_disable_steal_time();
  328. }
  329. static int kvm_pv_reboot_notify(struct notifier_block *nb,
  330. unsigned long code, void *unused)
  331. {
  332. if (code == SYS_RESTART)
  333. on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
  334. return NOTIFY_DONE;
  335. }
  336. static struct notifier_block kvm_pv_reboot_nb = {
  337. .notifier_call = kvm_pv_reboot_notify,
  338. };
  339. static u64 kvm_steal_clock(int cpu)
  340. {
  341. u64 steal;
  342. struct kvm_steal_time *src;
  343. int version;
  344. src = &per_cpu(steal_time, cpu);
  345. do {
  346. version = src->version;
  347. rmb();
  348. steal = src->steal;
  349. rmb();
  350. } while ((version & 1) || (version != src->version));
  351. return steal;
  352. }
  353. void kvm_disable_steal_time(void)
  354. {
  355. if (!has_steal_clock)
  356. return;
  357. wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
  358. }
  359. #ifdef CONFIG_SMP
  360. static void __init kvm_smp_prepare_boot_cpu(void)
  361. {
  362. kvm_guest_cpu_init();
  363. native_smp_prepare_boot_cpu();
  364. kvm_spinlock_init();
  365. }
  366. static void kvm_guest_cpu_offline(void)
  367. {
  368. kvm_disable_steal_time();
  369. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  370. wrmsrl(MSR_KVM_PV_EOI_EN, 0);
  371. kvm_pv_disable_apf();
  372. apf_task_wake_all();
  373. }
  374. static int kvm_cpu_online(unsigned int cpu)
  375. {
  376. local_irq_disable();
  377. kvm_guest_cpu_init();
  378. local_irq_enable();
  379. return 0;
  380. }
  381. static int kvm_cpu_down_prepare(unsigned int cpu)
  382. {
  383. local_irq_disable();
  384. kvm_guest_cpu_offline();
  385. local_irq_enable();
  386. return 0;
  387. }
  388. #endif
  389. static void __init kvm_apf_trap_init(void)
  390. {
  391. set_intr_gate(14, async_page_fault);
  392. }
  393. void __init kvm_guest_init(void)
  394. {
  395. int i;
  396. if (!kvm_para_available())
  397. return;
  398. paravirt_ops_setup();
  399. register_reboot_notifier(&kvm_pv_reboot_nb);
  400. for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
  401. raw_spin_lock_init(&async_pf_sleepers[i].lock);
  402. if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
  403. x86_init.irqs.trap_init = kvm_apf_trap_init;
  404. if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
  405. has_steal_clock = 1;
  406. pv_time_ops.steal_clock = kvm_steal_clock;
  407. }
  408. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  409. apic_set_eoi_write(kvm_guest_apic_eoi_write);
  410. if (kvmclock_vsyscall)
  411. kvm_setup_vsyscall_timeinfo();
  412. #ifdef CONFIG_SMP
  413. smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
  414. if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
  415. kvm_cpu_online, kvm_cpu_down_prepare) < 0)
  416. pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
  417. #else
  418. kvm_guest_cpu_init();
  419. #endif
  420. /*
  421. * Hard lockup detection is enabled by default. Disable it, as guests
  422. * can get false positives too easily, for example if the host is
  423. * overcommitted.
  424. */
  425. hardlockup_detector_disable();
  426. }
  427. static noinline uint32_t __kvm_cpuid_base(void)
  428. {
  429. if (boot_cpu_data.cpuid_level < 0)
  430. return 0; /* So we don't blow up on old processors */
  431. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  432. return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
  433. return 0;
  434. }
  435. static inline uint32_t kvm_cpuid_base(void)
  436. {
  437. static int kvm_cpuid_base = -1;
  438. if (kvm_cpuid_base == -1)
  439. kvm_cpuid_base = __kvm_cpuid_base();
  440. return kvm_cpuid_base;
  441. }
  442. bool kvm_para_available(void)
  443. {
  444. return kvm_cpuid_base() != 0;
  445. }
  446. EXPORT_SYMBOL_GPL(kvm_para_available);
  447. unsigned int kvm_arch_para_features(void)
  448. {
  449. return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
  450. }
  451. static uint32_t __init kvm_detect(void)
  452. {
  453. return kvm_cpuid_base();
  454. }
  455. const struct hypervisor_x86 x86_hyper_kvm __refconst = {
  456. .name = "KVM",
  457. .detect = kvm_detect,
  458. .x2apic_available = kvm_para_available,
  459. };
  460. EXPORT_SYMBOL_GPL(x86_hyper_kvm);
  461. static __init int activate_jump_labels(void)
  462. {
  463. if (has_steal_clock) {
  464. static_key_slow_inc(&paravirt_steal_enabled);
  465. if (steal_acc)
  466. static_key_slow_inc(&paravirt_steal_rq_enabled);
  467. }
  468. return 0;
  469. }
  470. arch_initcall(activate_jump_labels);
  471. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  472. /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
  473. static void kvm_kick_cpu(int cpu)
  474. {
  475. int apicid;
  476. unsigned long flags = 0;
  477. apicid = per_cpu(x86_cpu_to_apicid, cpu);
  478. kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
  479. }
  480. #include <asm/qspinlock.h>
  481. static void kvm_wait(u8 *ptr, u8 val)
  482. {
  483. unsigned long flags;
  484. if (in_nmi())
  485. return;
  486. local_irq_save(flags);
  487. if (READ_ONCE(*ptr) != val)
  488. goto out;
  489. /*
  490. * halt until it's our turn and kicked. Note that we do safe halt
  491. * for irq enabled case to avoid hang when lock info is overwritten
  492. * in irq spinlock slowpath and no spurious interrupt occur to save us.
  493. */
  494. if (arch_irqs_disabled_flags(flags))
  495. halt();
  496. else
  497. safe_halt();
  498. out:
  499. local_irq_restore(flags);
  500. }
  501. /*
  502. * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
  503. */
  504. void __init kvm_spinlock_init(void)
  505. {
  506. if (!kvm_para_available())
  507. return;
  508. /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
  509. if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
  510. return;
  511. __pv_init_lock_hash();
  512. pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
  513. pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
  514. pv_lock_ops.wait = kvm_wait;
  515. pv_lock_ops.kick = kvm_kick_cpu;
  516. }
  517. static __init int kvm_spinlock_init_jump(void)
  518. {
  519. if (!kvm_para_available())
  520. return 0;
  521. if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
  522. return 0;
  523. static_key_slow_inc(&paravirt_ticketlocks_enabled);
  524. printk(KERN_INFO "KVM setup paravirtual spinlock\n");
  525. return 0;
  526. }
  527. early_initcall(kvm_spinlock_init_jump);
  528. #endif /* CONFIG_PARAVIRT_SPINLOCKS */