vmbus_drv.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/slab.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/hyperv.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <asm/hyperv.h>
  37. #include <asm/hypervisor.h>
  38. #include <asm/mshyperv.h>
  39. #include <linux/notifier.h>
  40. #include <linux/ptrace.h>
  41. #include <linux/screen_info.h>
  42. #include <linux/kdebug.h>
  43. #include <linux/efi.h>
  44. #include <linux/random.h>
  45. #include "hyperv_vmbus.h"
  46. static struct acpi_device *hv_acpi_dev;
  47. static struct completion probe_event;
  48. static void hyperv_report_panic(struct pt_regs *regs)
  49. {
  50. static bool panic_reported;
  51. /*
  52. * We prefer to report panic on 'die' chain as we have proper
  53. * registers to report, but if we miss it (e.g. on BUG()) we need
  54. * to report it on 'panic'.
  55. */
  56. if (panic_reported)
  57. return;
  58. panic_reported = true;
  59. wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
  60. wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
  61. wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
  62. wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
  63. wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
  64. /*
  65. * Let Hyper-V know there is crash data available
  66. */
  67. wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
  68. }
  69. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  70. void *args)
  71. {
  72. struct pt_regs *regs;
  73. regs = current_pt_regs();
  74. hyperv_report_panic(regs);
  75. return NOTIFY_DONE;
  76. }
  77. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  78. void *args)
  79. {
  80. struct die_args *die = (struct die_args *)args;
  81. struct pt_regs *regs = die->regs;
  82. hyperv_report_panic(regs);
  83. return NOTIFY_DONE;
  84. }
  85. static struct notifier_block hyperv_die_block = {
  86. .notifier_call = hyperv_die_event,
  87. };
  88. static struct notifier_block hyperv_panic_block = {
  89. .notifier_call = hyperv_panic_event,
  90. };
  91. static const char *fb_mmio_name = "fb_range";
  92. static struct resource *fb_mmio;
  93. static struct resource *hyperv_mmio;
  94. static DEFINE_SEMAPHORE(hyperv_mmio_lock);
  95. static int vmbus_exists(void)
  96. {
  97. if (hv_acpi_dev == NULL)
  98. return -ENODEV;
  99. return 0;
  100. }
  101. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  102. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  103. {
  104. int i;
  105. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  106. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  107. }
  108. static u8 channel_monitor_group(struct vmbus_channel *channel)
  109. {
  110. return (u8)channel->offermsg.monitorid / 32;
  111. }
  112. static u8 channel_monitor_offset(struct vmbus_channel *channel)
  113. {
  114. return (u8)channel->offermsg.monitorid % 32;
  115. }
  116. static u32 channel_pending(struct vmbus_channel *channel,
  117. struct hv_monitor_page *monitor_page)
  118. {
  119. u8 monitor_group = channel_monitor_group(channel);
  120. return monitor_page->trigger_group[monitor_group].pending;
  121. }
  122. static u32 channel_latency(struct vmbus_channel *channel,
  123. struct hv_monitor_page *monitor_page)
  124. {
  125. u8 monitor_group = channel_monitor_group(channel);
  126. u8 monitor_offset = channel_monitor_offset(channel);
  127. return monitor_page->latency[monitor_group][monitor_offset];
  128. }
  129. static u32 channel_conn_id(struct vmbus_channel *channel,
  130. struct hv_monitor_page *monitor_page)
  131. {
  132. u8 monitor_group = channel_monitor_group(channel);
  133. u8 monitor_offset = channel_monitor_offset(channel);
  134. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  135. }
  136. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  137. char *buf)
  138. {
  139. struct hv_device *hv_dev = device_to_hv_device(dev);
  140. if (!hv_dev->channel)
  141. return -ENODEV;
  142. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  143. }
  144. static DEVICE_ATTR_RO(id);
  145. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  146. char *buf)
  147. {
  148. struct hv_device *hv_dev = device_to_hv_device(dev);
  149. if (!hv_dev->channel)
  150. return -ENODEV;
  151. return sprintf(buf, "%d\n", hv_dev->channel->state);
  152. }
  153. static DEVICE_ATTR_RO(state);
  154. static ssize_t monitor_id_show(struct device *dev,
  155. struct device_attribute *dev_attr, char *buf)
  156. {
  157. struct hv_device *hv_dev = device_to_hv_device(dev);
  158. if (!hv_dev->channel)
  159. return -ENODEV;
  160. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  161. }
  162. static DEVICE_ATTR_RO(monitor_id);
  163. static ssize_t class_id_show(struct device *dev,
  164. struct device_attribute *dev_attr, char *buf)
  165. {
  166. struct hv_device *hv_dev = device_to_hv_device(dev);
  167. if (!hv_dev->channel)
  168. return -ENODEV;
  169. return sprintf(buf, "{%pUl}\n",
  170. hv_dev->channel->offermsg.offer.if_type.b);
  171. }
  172. static DEVICE_ATTR_RO(class_id);
  173. static ssize_t device_id_show(struct device *dev,
  174. struct device_attribute *dev_attr, char *buf)
  175. {
  176. struct hv_device *hv_dev = device_to_hv_device(dev);
  177. if (!hv_dev->channel)
  178. return -ENODEV;
  179. return sprintf(buf, "{%pUl}\n",
  180. hv_dev->channel->offermsg.offer.if_instance.b);
  181. }
  182. static DEVICE_ATTR_RO(device_id);
  183. static ssize_t modalias_show(struct device *dev,
  184. struct device_attribute *dev_attr, char *buf)
  185. {
  186. struct hv_device *hv_dev = device_to_hv_device(dev);
  187. char alias_name[VMBUS_ALIAS_LEN + 1];
  188. print_alias_name(hv_dev, alias_name);
  189. return sprintf(buf, "vmbus:%s\n", alias_name);
  190. }
  191. static DEVICE_ATTR_RO(modalias);
  192. static ssize_t server_monitor_pending_show(struct device *dev,
  193. struct device_attribute *dev_attr,
  194. char *buf)
  195. {
  196. struct hv_device *hv_dev = device_to_hv_device(dev);
  197. if (!hv_dev->channel)
  198. return -ENODEV;
  199. return sprintf(buf, "%d\n",
  200. channel_pending(hv_dev->channel,
  201. vmbus_connection.monitor_pages[1]));
  202. }
  203. static DEVICE_ATTR_RO(server_monitor_pending);
  204. static ssize_t client_monitor_pending_show(struct device *dev,
  205. struct device_attribute *dev_attr,
  206. char *buf)
  207. {
  208. struct hv_device *hv_dev = device_to_hv_device(dev);
  209. if (!hv_dev->channel)
  210. return -ENODEV;
  211. return sprintf(buf, "%d\n",
  212. channel_pending(hv_dev->channel,
  213. vmbus_connection.monitor_pages[1]));
  214. }
  215. static DEVICE_ATTR_RO(client_monitor_pending);
  216. static ssize_t server_monitor_latency_show(struct device *dev,
  217. struct device_attribute *dev_attr,
  218. char *buf)
  219. {
  220. struct hv_device *hv_dev = device_to_hv_device(dev);
  221. if (!hv_dev->channel)
  222. return -ENODEV;
  223. return sprintf(buf, "%d\n",
  224. channel_latency(hv_dev->channel,
  225. vmbus_connection.monitor_pages[0]));
  226. }
  227. static DEVICE_ATTR_RO(server_monitor_latency);
  228. static ssize_t client_monitor_latency_show(struct device *dev,
  229. struct device_attribute *dev_attr,
  230. char *buf)
  231. {
  232. struct hv_device *hv_dev = device_to_hv_device(dev);
  233. if (!hv_dev->channel)
  234. return -ENODEV;
  235. return sprintf(buf, "%d\n",
  236. channel_latency(hv_dev->channel,
  237. vmbus_connection.monitor_pages[1]));
  238. }
  239. static DEVICE_ATTR_RO(client_monitor_latency);
  240. static ssize_t server_monitor_conn_id_show(struct device *dev,
  241. struct device_attribute *dev_attr,
  242. char *buf)
  243. {
  244. struct hv_device *hv_dev = device_to_hv_device(dev);
  245. if (!hv_dev->channel)
  246. return -ENODEV;
  247. return sprintf(buf, "%d\n",
  248. channel_conn_id(hv_dev->channel,
  249. vmbus_connection.monitor_pages[0]));
  250. }
  251. static DEVICE_ATTR_RO(server_monitor_conn_id);
  252. static ssize_t client_monitor_conn_id_show(struct device *dev,
  253. struct device_attribute *dev_attr,
  254. char *buf)
  255. {
  256. struct hv_device *hv_dev = device_to_hv_device(dev);
  257. if (!hv_dev->channel)
  258. return -ENODEV;
  259. return sprintf(buf, "%d\n",
  260. channel_conn_id(hv_dev->channel,
  261. vmbus_connection.monitor_pages[1]));
  262. }
  263. static DEVICE_ATTR_RO(client_monitor_conn_id);
  264. static ssize_t out_intr_mask_show(struct device *dev,
  265. struct device_attribute *dev_attr, char *buf)
  266. {
  267. struct hv_device *hv_dev = device_to_hv_device(dev);
  268. struct hv_ring_buffer_debug_info outbound;
  269. if (!hv_dev->channel)
  270. return -ENODEV;
  271. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  272. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  273. }
  274. static DEVICE_ATTR_RO(out_intr_mask);
  275. static ssize_t out_read_index_show(struct device *dev,
  276. struct device_attribute *dev_attr, char *buf)
  277. {
  278. struct hv_device *hv_dev = device_to_hv_device(dev);
  279. struct hv_ring_buffer_debug_info outbound;
  280. if (!hv_dev->channel)
  281. return -ENODEV;
  282. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  283. return sprintf(buf, "%d\n", outbound.current_read_index);
  284. }
  285. static DEVICE_ATTR_RO(out_read_index);
  286. static ssize_t out_write_index_show(struct device *dev,
  287. struct device_attribute *dev_attr,
  288. char *buf)
  289. {
  290. struct hv_device *hv_dev = device_to_hv_device(dev);
  291. struct hv_ring_buffer_debug_info outbound;
  292. if (!hv_dev->channel)
  293. return -ENODEV;
  294. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  295. return sprintf(buf, "%d\n", outbound.current_write_index);
  296. }
  297. static DEVICE_ATTR_RO(out_write_index);
  298. static ssize_t out_read_bytes_avail_show(struct device *dev,
  299. struct device_attribute *dev_attr,
  300. char *buf)
  301. {
  302. struct hv_device *hv_dev = device_to_hv_device(dev);
  303. struct hv_ring_buffer_debug_info outbound;
  304. if (!hv_dev->channel)
  305. return -ENODEV;
  306. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  307. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  308. }
  309. static DEVICE_ATTR_RO(out_read_bytes_avail);
  310. static ssize_t out_write_bytes_avail_show(struct device *dev,
  311. struct device_attribute *dev_attr,
  312. char *buf)
  313. {
  314. struct hv_device *hv_dev = device_to_hv_device(dev);
  315. struct hv_ring_buffer_debug_info outbound;
  316. if (!hv_dev->channel)
  317. return -ENODEV;
  318. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  319. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  320. }
  321. static DEVICE_ATTR_RO(out_write_bytes_avail);
  322. static ssize_t in_intr_mask_show(struct device *dev,
  323. struct device_attribute *dev_attr, char *buf)
  324. {
  325. struct hv_device *hv_dev = device_to_hv_device(dev);
  326. struct hv_ring_buffer_debug_info inbound;
  327. if (!hv_dev->channel)
  328. return -ENODEV;
  329. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  330. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  331. }
  332. static DEVICE_ATTR_RO(in_intr_mask);
  333. static ssize_t in_read_index_show(struct device *dev,
  334. struct device_attribute *dev_attr, char *buf)
  335. {
  336. struct hv_device *hv_dev = device_to_hv_device(dev);
  337. struct hv_ring_buffer_debug_info inbound;
  338. if (!hv_dev->channel)
  339. return -ENODEV;
  340. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  341. return sprintf(buf, "%d\n", inbound.current_read_index);
  342. }
  343. static DEVICE_ATTR_RO(in_read_index);
  344. static ssize_t in_write_index_show(struct device *dev,
  345. struct device_attribute *dev_attr, char *buf)
  346. {
  347. struct hv_device *hv_dev = device_to_hv_device(dev);
  348. struct hv_ring_buffer_debug_info inbound;
  349. if (!hv_dev->channel)
  350. return -ENODEV;
  351. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  352. return sprintf(buf, "%d\n", inbound.current_write_index);
  353. }
  354. static DEVICE_ATTR_RO(in_write_index);
  355. static ssize_t in_read_bytes_avail_show(struct device *dev,
  356. struct device_attribute *dev_attr,
  357. char *buf)
  358. {
  359. struct hv_device *hv_dev = device_to_hv_device(dev);
  360. struct hv_ring_buffer_debug_info inbound;
  361. if (!hv_dev->channel)
  362. return -ENODEV;
  363. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  364. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  365. }
  366. static DEVICE_ATTR_RO(in_read_bytes_avail);
  367. static ssize_t in_write_bytes_avail_show(struct device *dev,
  368. struct device_attribute *dev_attr,
  369. char *buf)
  370. {
  371. struct hv_device *hv_dev = device_to_hv_device(dev);
  372. struct hv_ring_buffer_debug_info inbound;
  373. if (!hv_dev->channel)
  374. return -ENODEV;
  375. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  376. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  377. }
  378. static DEVICE_ATTR_RO(in_write_bytes_avail);
  379. static ssize_t channel_vp_mapping_show(struct device *dev,
  380. struct device_attribute *dev_attr,
  381. char *buf)
  382. {
  383. struct hv_device *hv_dev = device_to_hv_device(dev);
  384. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  385. unsigned long flags;
  386. int buf_size = PAGE_SIZE, n_written, tot_written;
  387. struct list_head *cur;
  388. if (!channel)
  389. return -ENODEV;
  390. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  391. channel->offermsg.child_relid, channel->target_cpu);
  392. spin_lock_irqsave(&channel->lock, flags);
  393. list_for_each(cur, &channel->sc_list) {
  394. if (tot_written >= buf_size - 1)
  395. break;
  396. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  397. n_written = scnprintf(buf + tot_written,
  398. buf_size - tot_written,
  399. "%u:%u\n",
  400. cur_sc->offermsg.child_relid,
  401. cur_sc->target_cpu);
  402. tot_written += n_written;
  403. }
  404. spin_unlock_irqrestore(&channel->lock, flags);
  405. return tot_written;
  406. }
  407. static DEVICE_ATTR_RO(channel_vp_mapping);
  408. static ssize_t vendor_show(struct device *dev,
  409. struct device_attribute *dev_attr,
  410. char *buf)
  411. {
  412. struct hv_device *hv_dev = device_to_hv_device(dev);
  413. return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
  414. }
  415. static DEVICE_ATTR_RO(vendor);
  416. static ssize_t device_show(struct device *dev,
  417. struct device_attribute *dev_attr,
  418. char *buf)
  419. {
  420. struct hv_device *hv_dev = device_to_hv_device(dev);
  421. return sprintf(buf, "0x%x\n", hv_dev->device_id);
  422. }
  423. static DEVICE_ATTR_RO(device);
  424. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  425. static struct attribute *vmbus_attrs[] = {
  426. &dev_attr_id.attr,
  427. &dev_attr_state.attr,
  428. &dev_attr_monitor_id.attr,
  429. &dev_attr_class_id.attr,
  430. &dev_attr_device_id.attr,
  431. &dev_attr_modalias.attr,
  432. &dev_attr_server_monitor_pending.attr,
  433. &dev_attr_client_monitor_pending.attr,
  434. &dev_attr_server_monitor_latency.attr,
  435. &dev_attr_client_monitor_latency.attr,
  436. &dev_attr_server_monitor_conn_id.attr,
  437. &dev_attr_client_monitor_conn_id.attr,
  438. &dev_attr_out_intr_mask.attr,
  439. &dev_attr_out_read_index.attr,
  440. &dev_attr_out_write_index.attr,
  441. &dev_attr_out_read_bytes_avail.attr,
  442. &dev_attr_out_write_bytes_avail.attr,
  443. &dev_attr_in_intr_mask.attr,
  444. &dev_attr_in_read_index.attr,
  445. &dev_attr_in_write_index.attr,
  446. &dev_attr_in_read_bytes_avail.attr,
  447. &dev_attr_in_write_bytes_avail.attr,
  448. &dev_attr_channel_vp_mapping.attr,
  449. &dev_attr_vendor.attr,
  450. &dev_attr_device.attr,
  451. NULL,
  452. };
  453. ATTRIBUTE_GROUPS(vmbus);
  454. /*
  455. * vmbus_uevent - add uevent for our device
  456. *
  457. * This routine is invoked when a device is added or removed on the vmbus to
  458. * generate a uevent to udev in the userspace. The udev will then look at its
  459. * rule and the uevent generated here to load the appropriate driver
  460. *
  461. * The alias string will be of the form vmbus:guid where guid is the string
  462. * representation of the device guid (each byte of the guid will be
  463. * represented with two hex characters.
  464. */
  465. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  466. {
  467. struct hv_device *dev = device_to_hv_device(device);
  468. int ret;
  469. char alias_name[VMBUS_ALIAS_LEN + 1];
  470. print_alias_name(dev, alias_name);
  471. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  472. return ret;
  473. }
  474. static const uuid_le null_guid;
  475. static inline bool is_null_guid(const uuid_le *guid)
  476. {
  477. if (uuid_le_cmp(*guid, null_guid))
  478. return false;
  479. return true;
  480. }
  481. /*
  482. * Return a matching hv_vmbus_device_id pointer.
  483. * If there is no match, return NULL.
  484. */
  485. static const struct hv_vmbus_device_id *hv_vmbus_get_id(
  486. const struct hv_vmbus_device_id *id,
  487. const uuid_le *guid)
  488. {
  489. for (; !is_null_guid(&id->guid); id++)
  490. if (!uuid_le_cmp(id->guid, *guid))
  491. return id;
  492. return NULL;
  493. }
  494. /*
  495. * vmbus_match - Attempt to match the specified device to the specified driver
  496. */
  497. static int vmbus_match(struct device *device, struct device_driver *driver)
  498. {
  499. struct hv_driver *drv = drv_to_hv_drv(driver);
  500. struct hv_device *hv_dev = device_to_hv_device(device);
  501. /* The hv_sock driver handles all hv_sock offers. */
  502. if (is_hvsock_channel(hv_dev->channel))
  503. return drv->hvsock;
  504. if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
  505. return 1;
  506. return 0;
  507. }
  508. /*
  509. * vmbus_probe - Add the new vmbus's child device
  510. */
  511. static int vmbus_probe(struct device *child_device)
  512. {
  513. int ret = 0;
  514. struct hv_driver *drv =
  515. drv_to_hv_drv(child_device->driver);
  516. struct hv_device *dev = device_to_hv_device(child_device);
  517. const struct hv_vmbus_device_id *dev_id;
  518. dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
  519. if (drv->probe) {
  520. ret = drv->probe(dev, dev_id);
  521. if (ret != 0)
  522. pr_err("probe failed for device %s (%d)\n",
  523. dev_name(child_device), ret);
  524. } else {
  525. pr_err("probe not set for driver %s\n",
  526. dev_name(child_device));
  527. ret = -ENODEV;
  528. }
  529. return ret;
  530. }
  531. /*
  532. * vmbus_remove - Remove a vmbus device
  533. */
  534. static int vmbus_remove(struct device *child_device)
  535. {
  536. struct hv_driver *drv;
  537. struct hv_device *dev = device_to_hv_device(child_device);
  538. if (child_device->driver) {
  539. drv = drv_to_hv_drv(child_device->driver);
  540. if (drv->remove)
  541. drv->remove(dev);
  542. }
  543. return 0;
  544. }
  545. /*
  546. * vmbus_shutdown - Shutdown a vmbus device
  547. */
  548. static void vmbus_shutdown(struct device *child_device)
  549. {
  550. struct hv_driver *drv;
  551. struct hv_device *dev = device_to_hv_device(child_device);
  552. /* The device may not be attached yet */
  553. if (!child_device->driver)
  554. return;
  555. drv = drv_to_hv_drv(child_device->driver);
  556. if (drv->shutdown)
  557. drv->shutdown(dev);
  558. return;
  559. }
  560. /*
  561. * vmbus_device_release - Final callback release of the vmbus child device
  562. */
  563. static void vmbus_device_release(struct device *device)
  564. {
  565. struct hv_device *hv_dev = device_to_hv_device(device);
  566. struct vmbus_channel *channel = hv_dev->channel;
  567. hv_process_channel_removal(channel,
  568. channel->offermsg.child_relid);
  569. kfree(hv_dev);
  570. }
  571. /* The one and only one */
  572. static struct bus_type hv_bus = {
  573. .name = "vmbus",
  574. .match = vmbus_match,
  575. .shutdown = vmbus_shutdown,
  576. .remove = vmbus_remove,
  577. .probe = vmbus_probe,
  578. .uevent = vmbus_uevent,
  579. .dev_groups = vmbus_groups,
  580. };
  581. struct onmessage_work_context {
  582. struct work_struct work;
  583. struct hv_message msg;
  584. };
  585. static void vmbus_onmessage_work(struct work_struct *work)
  586. {
  587. struct onmessage_work_context *ctx;
  588. /* Do not process messages if we're in DISCONNECTED state */
  589. if (vmbus_connection.conn_state == DISCONNECTED)
  590. return;
  591. ctx = container_of(work, struct onmessage_work_context,
  592. work);
  593. vmbus_onmessage(&ctx->msg);
  594. kfree(ctx);
  595. }
  596. static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
  597. {
  598. struct clock_event_device *dev = hv_context.clk_evt[cpu];
  599. if (dev->event_handler)
  600. dev->event_handler(dev);
  601. vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
  602. }
  603. void vmbus_on_msg_dpc(unsigned long data)
  604. {
  605. int cpu = smp_processor_id();
  606. void *page_addr = hv_context.synic_message_page[cpu];
  607. struct hv_message *msg = (struct hv_message *)page_addr +
  608. VMBUS_MESSAGE_SINT;
  609. struct vmbus_channel_message_header *hdr;
  610. struct vmbus_channel_message_table_entry *entry;
  611. struct onmessage_work_context *ctx;
  612. u32 message_type = msg->header.message_type;
  613. if (message_type == HVMSG_NONE)
  614. /* no msg */
  615. return;
  616. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  617. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  618. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  619. goto msg_handled;
  620. }
  621. entry = &channel_message_table[hdr->msgtype];
  622. if (entry->handler_type == VMHT_BLOCKING) {
  623. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  624. if (ctx == NULL)
  625. return;
  626. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  627. memcpy(&ctx->msg, msg, sizeof(*msg));
  628. queue_work(vmbus_connection.work_queue, &ctx->work);
  629. } else
  630. entry->message_handler(hdr);
  631. msg_handled:
  632. vmbus_signal_eom(msg, message_type);
  633. }
  634. static void vmbus_isr(void)
  635. {
  636. int cpu = smp_processor_id();
  637. void *page_addr;
  638. struct hv_message *msg;
  639. union hv_synic_event_flags *event;
  640. bool handled = false;
  641. page_addr = hv_context.synic_event_page[cpu];
  642. if (page_addr == NULL)
  643. return;
  644. event = (union hv_synic_event_flags *)page_addr +
  645. VMBUS_MESSAGE_SINT;
  646. /*
  647. * Check for events before checking for messages. This is the order
  648. * in which events and messages are checked in Windows guests on
  649. * Hyper-V, and the Windows team suggested we do the same.
  650. */
  651. if ((vmbus_proto_version == VERSION_WS2008) ||
  652. (vmbus_proto_version == VERSION_WIN7)) {
  653. /* Since we are a child, we only need to check bit 0 */
  654. if (sync_test_and_clear_bit(0,
  655. (unsigned long *) &event->flags32[0])) {
  656. handled = true;
  657. }
  658. } else {
  659. /*
  660. * Our host is win8 or above. The signaling mechanism
  661. * has changed and we can directly look at the event page.
  662. * If bit n is set then we have an interrup on the channel
  663. * whose id is n.
  664. */
  665. handled = true;
  666. }
  667. if (handled)
  668. tasklet_schedule(hv_context.event_dpc[cpu]);
  669. page_addr = hv_context.synic_message_page[cpu];
  670. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  671. /* Check if there are actual msgs to be processed */
  672. if (msg->header.message_type != HVMSG_NONE) {
  673. if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
  674. hv_process_timer_expiration(msg, cpu);
  675. else
  676. tasklet_schedule(hv_context.msg_dpc[cpu]);
  677. }
  678. add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
  679. }
  680. /*
  681. * vmbus_bus_init -Main vmbus driver initialization routine.
  682. *
  683. * Here, we
  684. * - initialize the vmbus driver context
  685. * - invoke the vmbus hv main init routine
  686. * - retrieve the channel offers
  687. */
  688. static int vmbus_bus_init(void)
  689. {
  690. int ret;
  691. /* Hypervisor initialization...setup hypercall page..etc */
  692. ret = hv_init();
  693. if (ret != 0) {
  694. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  695. return ret;
  696. }
  697. ret = bus_register(&hv_bus);
  698. if (ret)
  699. goto err_cleanup;
  700. hv_setup_vmbus_irq(vmbus_isr);
  701. ret = hv_synic_alloc();
  702. if (ret)
  703. goto err_alloc;
  704. /*
  705. * Initialize the per-cpu interrupt state and
  706. * connect to the host.
  707. */
  708. on_each_cpu(hv_synic_init, NULL, 1);
  709. ret = vmbus_connect();
  710. if (ret)
  711. goto err_connect;
  712. if (vmbus_proto_version > VERSION_WIN7)
  713. cpu_hotplug_disable();
  714. /*
  715. * Only register if the crash MSRs are available
  716. */
  717. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  718. register_die_notifier(&hyperv_die_block);
  719. atomic_notifier_chain_register(&panic_notifier_list,
  720. &hyperv_panic_block);
  721. }
  722. vmbus_request_offers();
  723. return 0;
  724. err_connect:
  725. on_each_cpu(hv_synic_cleanup, NULL, 1);
  726. err_alloc:
  727. hv_synic_free();
  728. hv_remove_vmbus_irq();
  729. bus_unregister(&hv_bus);
  730. err_cleanup:
  731. hv_cleanup(false);
  732. return ret;
  733. }
  734. /**
  735. * __vmbus_child_driver_register() - Register a vmbus's driver
  736. * @hv_driver: Pointer to driver structure you want to register
  737. * @owner: owner module of the drv
  738. * @mod_name: module name string
  739. *
  740. * Registers the given driver with Linux through the 'driver_register()' call
  741. * and sets up the hyper-v vmbus handling for this driver.
  742. * It will return the state of the 'driver_register()' call.
  743. *
  744. */
  745. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  746. {
  747. int ret;
  748. pr_info("registering driver %s\n", hv_driver->name);
  749. ret = vmbus_exists();
  750. if (ret < 0)
  751. return ret;
  752. hv_driver->driver.name = hv_driver->name;
  753. hv_driver->driver.owner = owner;
  754. hv_driver->driver.mod_name = mod_name;
  755. hv_driver->driver.bus = &hv_bus;
  756. ret = driver_register(&hv_driver->driver);
  757. return ret;
  758. }
  759. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  760. /**
  761. * vmbus_driver_unregister() - Unregister a vmbus's driver
  762. * @hv_driver: Pointer to driver structure you want to
  763. * un-register
  764. *
  765. * Un-register the given driver that was previous registered with a call to
  766. * vmbus_driver_register()
  767. */
  768. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  769. {
  770. pr_info("unregistering driver %s\n", hv_driver->name);
  771. if (!vmbus_exists())
  772. driver_unregister(&hv_driver->driver);
  773. }
  774. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  775. /*
  776. * vmbus_device_create - Creates and registers a new child device
  777. * on the vmbus.
  778. */
  779. struct hv_device *vmbus_device_create(const uuid_le *type,
  780. const uuid_le *instance,
  781. struct vmbus_channel *channel)
  782. {
  783. struct hv_device *child_device_obj;
  784. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  785. if (!child_device_obj) {
  786. pr_err("Unable to allocate device object for child device\n");
  787. return NULL;
  788. }
  789. child_device_obj->channel = channel;
  790. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  791. memcpy(&child_device_obj->dev_instance, instance,
  792. sizeof(uuid_le));
  793. child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
  794. return child_device_obj;
  795. }
  796. /*
  797. * vmbus_device_register - Register the child device
  798. */
  799. int vmbus_device_register(struct hv_device *child_device_obj)
  800. {
  801. int ret = 0;
  802. dev_set_name(&child_device_obj->device, "%pUl",
  803. child_device_obj->channel->offermsg.offer.if_instance.b);
  804. child_device_obj->device.bus = &hv_bus;
  805. child_device_obj->device.parent = &hv_acpi_dev->dev;
  806. child_device_obj->device.release = vmbus_device_release;
  807. /*
  808. * Register with the LDM. This will kick off the driver/device
  809. * binding...which will eventually call vmbus_match() and vmbus_probe()
  810. */
  811. ret = device_register(&child_device_obj->device);
  812. if (ret)
  813. pr_err("Unable to register child device\n");
  814. else
  815. pr_debug("child device %s registered\n",
  816. dev_name(&child_device_obj->device));
  817. return ret;
  818. }
  819. /*
  820. * vmbus_device_unregister - Remove the specified child device
  821. * from the vmbus.
  822. */
  823. void vmbus_device_unregister(struct hv_device *device_obj)
  824. {
  825. pr_debug("child device %s unregistered\n",
  826. dev_name(&device_obj->device));
  827. /*
  828. * Kick off the process of unregistering the device.
  829. * This will call vmbus_remove() and eventually vmbus_device_release()
  830. */
  831. device_unregister(&device_obj->device);
  832. }
  833. /*
  834. * VMBUS is an acpi enumerated device. Get the information we
  835. * need from DSDT.
  836. */
  837. #define VTPM_BASE_ADDRESS 0xfed40000
  838. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  839. {
  840. resource_size_t start = 0;
  841. resource_size_t end = 0;
  842. struct resource *new_res;
  843. struct resource **old_res = &hyperv_mmio;
  844. struct resource **prev_res = NULL;
  845. switch (res->type) {
  846. /*
  847. * "Address" descriptors are for bus windows. Ignore
  848. * "memory" descriptors, which are for registers on
  849. * devices.
  850. */
  851. case ACPI_RESOURCE_TYPE_ADDRESS32:
  852. start = res->data.address32.address.minimum;
  853. end = res->data.address32.address.maximum;
  854. break;
  855. case ACPI_RESOURCE_TYPE_ADDRESS64:
  856. start = res->data.address64.address.minimum;
  857. end = res->data.address64.address.maximum;
  858. break;
  859. default:
  860. /* Unused resource type */
  861. return AE_OK;
  862. }
  863. /*
  864. * Ignore ranges that are below 1MB, as they're not
  865. * necessary or useful here.
  866. */
  867. if (end < 0x100000)
  868. return AE_OK;
  869. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  870. if (!new_res)
  871. return AE_NO_MEMORY;
  872. /* If this range overlaps the virtual TPM, truncate it. */
  873. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  874. end = VTPM_BASE_ADDRESS;
  875. new_res->name = "hyperv mmio";
  876. new_res->flags = IORESOURCE_MEM;
  877. new_res->start = start;
  878. new_res->end = end;
  879. /*
  880. * If two ranges are adjacent, merge them.
  881. */
  882. do {
  883. if (!*old_res) {
  884. *old_res = new_res;
  885. break;
  886. }
  887. if (((*old_res)->end + 1) == new_res->start) {
  888. (*old_res)->end = new_res->end;
  889. kfree(new_res);
  890. break;
  891. }
  892. if ((*old_res)->start == new_res->end + 1) {
  893. (*old_res)->start = new_res->start;
  894. kfree(new_res);
  895. break;
  896. }
  897. if ((*old_res)->start > new_res->end) {
  898. new_res->sibling = *old_res;
  899. if (prev_res)
  900. (*prev_res)->sibling = new_res;
  901. *old_res = new_res;
  902. break;
  903. }
  904. prev_res = old_res;
  905. old_res = &(*old_res)->sibling;
  906. } while (1);
  907. return AE_OK;
  908. }
  909. static int vmbus_acpi_remove(struct acpi_device *device)
  910. {
  911. struct resource *cur_res;
  912. struct resource *next_res;
  913. if (hyperv_mmio) {
  914. if (fb_mmio) {
  915. __release_region(hyperv_mmio, fb_mmio->start,
  916. resource_size(fb_mmio));
  917. fb_mmio = NULL;
  918. }
  919. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  920. next_res = cur_res->sibling;
  921. kfree(cur_res);
  922. }
  923. }
  924. return 0;
  925. }
  926. static void vmbus_reserve_fb(void)
  927. {
  928. int size;
  929. /*
  930. * Make a claim for the frame buffer in the resource tree under the
  931. * first node, which will be the one below 4GB. The length seems to
  932. * be underreported, particularly in a Generation 1 VM. So start out
  933. * reserving a larger area and make it smaller until it succeeds.
  934. */
  935. if (screen_info.lfb_base) {
  936. if (efi_enabled(EFI_BOOT))
  937. size = max_t(__u32, screen_info.lfb_size, 0x800000);
  938. else
  939. size = max_t(__u32, screen_info.lfb_size, 0x4000000);
  940. for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
  941. fb_mmio = __request_region(hyperv_mmio,
  942. screen_info.lfb_base, size,
  943. fb_mmio_name, 0);
  944. }
  945. }
  946. }
  947. /**
  948. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  949. * @new: If successful, supplied a pointer to the
  950. * allocated MMIO space.
  951. * @device_obj: Identifies the caller
  952. * @min: Minimum guest physical address of the
  953. * allocation
  954. * @max: Maximum guest physical address
  955. * @size: Size of the range to be allocated
  956. * @align: Alignment of the range to be allocated
  957. * @fb_overlap_ok: Whether this allocation can be allowed
  958. * to overlap the video frame buffer.
  959. *
  960. * This function walks the resources granted to VMBus by the
  961. * _CRS object in the ACPI namespace underneath the parent
  962. * "bridge" whether that's a root PCI bus in the Generation 1
  963. * case or a Module Device in the Generation 2 case. It then
  964. * attempts to allocate from the global MMIO pool in a way that
  965. * matches the constraints supplied in these parameters and by
  966. * that _CRS.
  967. *
  968. * Return: 0 on success, -errno on failure
  969. */
  970. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  971. resource_size_t min, resource_size_t max,
  972. resource_size_t size, resource_size_t align,
  973. bool fb_overlap_ok)
  974. {
  975. struct resource *iter, *shadow;
  976. resource_size_t range_min, range_max, start;
  977. const char *dev_n = dev_name(&device_obj->device);
  978. int retval;
  979. retval = -ENXIO;
  980. down(&hyperv_mmio_lock);
  981. /*
  982. * If overlaps with frame buffers are allowed, then first attempt to
  983. * make the allocation from within the reserved region. Because it
  984. * is already reserved, no shadow allocation is necessary.
  985. */
  986. if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
  987. !(max < fb_mmio->start)) {
  988. range_min = fb_mmio->start;
  989. range_max = fb_mmio->end;
  990. start = (range_min + align - 1) & ~(align - 1);
  991. for (; start + size - 1 <= range_max; start += align) {
  992. *new = request_mem_region_exclusive(start, size, dev_n);
  993. if (*new) {
  994. retval = 0;
  995. goto exit;
  996. }
  997. }
  998. }
  999. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1000. if ((iter->start >= max) || (iter->end <= min))
  1001. continue;
  1002. range_min = iter->start;
  1003. range_max = iter->end;
  1004. start = (range_min + align - 1) & ~(align - 1);
  1005. for (; start + size - 1 <= range_max; start += align) {
  1006. shadow = __request_region(iter, start, size, NULL,
  1007. IORESOURCE_BUSY);
  1008. if (!shadow)
  1009. continue;
  1010. *new = request_mem_region_exclusive(start, size, dev_n);
  1011. if (*new) {
  1012. shadow->name = (char *)*new;
  1013. retval = 0;
  1014. goto exit;
  1015. }
  1016. __release_region(iter, start, size);
  1017. }
  1018. }
  1019. exit:
  1020. up(&hyperv_mmio_lock);
  1021. return retval;
  1022. }
  1023. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  1024. /**
  1025. * vmbus_free_mmio() - Free a memory-mapped I/O range.
  1026. * @start: Base address of region to release.
  1027. * @size: Size of the range to be allocated
  1028. *
  1029. * This function releases anything requested by
  1030. * vmbus_mmio_allocate().
  1031. */
  1032. void vmbus_free_mmio(resource_size_t start, resource_size_t size)
  1033. {
  1034. struct resource *iter;
  1035. down(&hyperv_mmio_lock);
  1036. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1037. if ((iter->start >= start + size) || (iter->end <= start))
  1038. continue;
  1039. __release_region(iter, start, size);
  1040. }
  1041. release_mem_region(start, size);
  1042. up(&hyperv_mmio_lock);
  1043. }
  1044. EXPORT_SYMBOL_GPL(vmbus_free_mmio);
  1045. /**
  1046. * vmbus_cpu_number_to_vp_number() - Map CPU to VP.
  1047. * @cpu_number: CPU number in Linux terms
  1048. *
  1049. * This function returns the mapping between the Linux processor
  1050. * number and the hypervisor's virtual processor number, useful
  1051. * in making hypercalls and such that talk about specific
  1052. * processors.
  1053. *
  1054. * Return: Virtual processor number in Hyper-V terms
  1055. */
  1056. int vmbus_cpu_number_to_vp_number(int cpu_number)
  1057. {
  1058. return hv_context.vp_index[cpu_number];
  1059. }
  1060. EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
  1061. static int vmbus_acpi_add(struct acpi_device *device)
  1062. {
  1063. acpi_status result;
  1064. int ret_val = -ENODEV;
  1065. struct acpi_device *ancestor;
  1066. hv_acpi_dev = device;
  1067. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1068. vmbus_walk_resources, NULL);
  1069. if (ACPI_FAILURE(result))
  1070. goto acpi_walk_err;
  1071. /*
  1072. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1073. * firmware) is the VMOD that has the mmio ranges. Get that.
  1074. */
  1075. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1076. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1077. vmbus_walk_resources, NULL);
  1078. if (ACPI_FAILURE(result))
  1079. continue;
  1080. if (hyperv_mmio) {
  1081. vmbus_reserve_fb();
  1082. break;
  1083. }
  1084. }
  1085. ret_val = 0;
  1086. acpi_walk_err:
  1087. complete(&probe_event);
  1088. if (ret_val)
  1089. vmbus_acpi_remove(device);
  1090. return ret_val;
  1091. }
  1092. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  1093. {"VMBUS", 0},
  1094. {"VMBus", 0},
  1095. {"", 0},
  1096. };
  1097. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  1098. static struct acpi_driver vmbus_acpi_driver = {
  1099. .name = "vmbus",
  1100. .ids = vmbus_acpi_device_ids,
  1101. .ops = {
  1102. .add = vmbus_acpi_add,
  1103. .remove = vmbus_acpi_remove,
  1104. },
  1105. };
  1106. static void hv_kexec_handler(void)
  1107. {
  1108. int cpu;
  1109. hv_synic_clockevents_cleanup();
  1110. vmbus_initiate_unload(false);
  1111. for_each_online_cpu(cpu)
  1112. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1113. hv_cleanup(false);
  1114. };
  1115. static void hv_crash_handler(struct pt_regs *regs)
  1116. {
  1117. vmbus_initiate_unload(true);
  1118. /*
  1119. * In crash handler we can't schedule synic cleanup for all CPUs,
  1120. * doing the cleanup for current CPU only. This should be sufficient
  1121. * for kdump.
  1122. */
  1123. hv_synic_cleanup(NULL);
  1124. hv_cleanup(true);
  1125. };
  1126. static int __init hv_acpi_init(void)
  1127. {
  1128. int ret, t;
  1129. if (x86_hyper != &x86_hyper_ms_hyperv)
  1130. return -ENODEV;
  1131. init_completion(&probe_event);
  1132. /*
  1133. * Get ACPI resources first.
  1134. */
  1135. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  1136. if (ret)
  1137. return ret;
  1138. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  1139. if (t == 0) {
  1140. ret = -ETIMEDOUT;
  1141. goto cleanup;
  1142. }
  1143. ret = vmbus_bus_init();
  1144. if (ret)
  1145. goto cleanup;
  1146. hv_setup_kexec_handler(hv_kexec_handler);
  1147. hv_setup_crash_handler(hv_crash_handler);
  1148. return 0;
  1149. cleanup:
  1150. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1151. hv_acpi_dev = NULL;
  1152. return ret;
  1153. }
  1154. static void __exit vmbus_exit(void)
  1155. {
  1156. int cpu;
  1157. hv_remove_kexec_handler();
  1158. hv_remove_crash_handler();
  1159. vmbus_connection.conn_state = DISCONNECTED;
  1160. hv_synic_clockevents_cleanup();
  1161. vmbus_disconnect();
  1162. hv_remove_vmbus_irq();
  1163. for_each_online_cpu(cpu)
  1164. tasklet_kill(hv_context.msg_dpc[cpu]);
  1165. vmbus_free_channels();
  1166. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1167. unregister_die_notifier(&hyperv_die_block);
  1168. atomic_notifier_chain_unregister(&panic_notifier_list,
  1169. &hyperv_panic_block);
  1170. }
  1171. bus_unregister(&hv_bus);
  1172. hv_cleanup(false);
  1173. for_each_online_cpu(cpu) {
  1174. tasklet_kill(hv_context.event_dpc[cpu]);
  1175. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1176. }
  1177. hv_synic_free();
  1178. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1179. if (vmbus_proto_version > VERSION_WIN7)
  1180. cpu_hotplug_enable();
  1181. }
  1182. MODULE_LICENSE("GPL");
  1183. subsys_initcall(hv_acpi_init);
  1184. module_exit(vmbus_exit);