mtty.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. /*
  2. * Mediated virtual PCI serial host device driver
  3. *
  4. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  5. * Author: Neo Jia <cjia@nvidia.com>
  6. * Kirti Wankhede <kwankhede@nvidia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Sample driver that creates mdev device that simulates serial port over PCI
  13. * card.
  14. *
  15. */
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/device.h>
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/poll.h>
  22. #include <linux/slab.h>
  23. #include <linux/cdev.h>
  24. #include <linux/sched.h>
  25. #include <linux/wait.h>
  26. #include <linux/uuid.h>
  27. #include <linux/vfio.h>
  28. #include <linux/iommu.h>
  29. #include <linux/sysfs.h>
  30. #include <linux/ctype.h>
  31. #include <linux/file.h>
  32. #include <linux/mdev.h>
  33. #include <linux/pci.h>
  34. #include <linux/serial.h>
  35. #include <uapi/linux/serial_reg.h>
  36. #include <linux/eventfd.h>
  37. /*
  38. * #defines
  39. */
  40. #define VERSION_STRING "0.1"
  41. #define DRIVER_AUTHOR "NVIDIA Corporation"
  42. #define MTTY_CLASS_NAME "mtty"
  43. #define MTTY_NAME "mtty"
  44. #define MTTY_STRING_LEN 16
  45. #define MTTY_CONFIG_SPACE_SIZE 0xff
  46. #define MTTY_IO_BAR_SIZE 0x8
  47. #define MTTY_MMIO_BAR_SIZE 0x100000
  48. #define STORE_LE16(addr, val) (*(u16 *)addr = val)
  49. #define STORE_LE32(addr, val) (*(u32 *)addr = val)
  50. #define MAX_FIFO_SIZE 16
  51. #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
  52. #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
  53. #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
  54. #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
  55. ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
  56. #define MTTY_VFIO_PCI_OFFSET_MASK \
  57. (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
  58. #define MAX_MTTYS 24
  59. /*
  60. * Global Structures
  61. */
  62. struct mtty_dev {
  63. dev_t vd_devt;
  64. struct class *vd_class;
  65. struct cdev vd_cdev;
  66. struct idr vd_idr;
  67. struct device dev;
  68. } mtty_dev;
  69. struct mdev_region_info {
  70. u64 start;
  71. u64 phys_start;
  72. u32 size;
  73. u64 vfio_offset;
  74. };
  75. #if defined(DEBUG_REGS)
  76. const char *wr_reg[] = {
  77. "TX",
  78. "IER",
  79. "FCR",
  80. "LCR",
  81. "MCR",
  82. "LSR",
  83. "MSR",
  84. "SCR"
  85. };
  86. const char *rd_reg[] = {
  87. "RX",
  88. "IER",
  89. "IIR",
  90. "LCR",
  91. "MCR",
  92. "LSR",
  93. "MSR",
  94. "SCR"
  95. };
  96. #endif
  97. /* loop back buffer */
  98. struct rxtx {
  99. u8 fifo[MAX_FIFO_SIZE];
  100. u8 head, tail;
  101. u8 count;
  102. };
  103. struct serial_port {
  104. u8 uart_reg[8]; /* 8 registers */
  105. struct rxtx rxtx; /* loop back buffer */
  106. bool dlab;
  107. bool overrun;
  108. u16 divisor;
  109. u8 fcr; /* FIFO control register */
  110. u8 max_fifo_size;
  111. u8 intr_trigger_level; /* interrupt trigger level */
  112. };
  113. /* State of each mdev device */
  114. struct mdev_state {
  115. int irq_fd;
  116. struct eventfd_ctx *intx_evtfd;
  117. struct eventfd_ctx *msi_evtfd;
  118. int irq_index;
  119. u8 *vconfig;
  120. struct mutex ops_lock;
  121. struct mdev_device *mdev;
  122. struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
  123. u32 bar_mask[VFIO_PCI_NUM_REGIONS];
  124. struct list_head next;
  125. struct serial_port s[2];
  126. struct mutex rxtx_lock;
  127. struct vfio_device_info dev_info;
  128. int nr_ports;
  129. };
  130. struct mutex mdev_list_lock;
  131. struct list_head mdev_devices_list;
  132. static const struct file_operations vd_fops = {
  133. .owner = THIS_MODULE,
  134. };
  135. /* function prototypes */
  136. static int mtty_trigger_interrupt(uuid_le uuid);
  137. /* Helper functions */
  138. static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
  139. {
  140. struct mdev_state *mds;
  141. list_for_each_entry(mds, &mdev_devices_list, next) {
  142. if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
  143. return mds;
  144. }
  145. return NULL;
  146. }
  147. void dump_buffer(char *buf, uint32_t count)
  148. {
  149. #if defined(DEBUG)
  150. int i;
  151. pr_info("Buffer:\n");
  152. for (i = 0; i < count; i++) {
  153. pr_info("%2x ", *(buf + i));
  154. if ((i + 1) % 16 == 0)
  155. pr_info("\n");
  156. }
  157. #endif
  158. }
  159. static void mtty_create_config_space(struct mdev_state *mdev_state)
  160. {
  161. /* PCI dev ID */
  162. STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
  163. /* Control: I/O+, Mem-, BusMaster- */
  164. STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
  165. /* Status: capabilities list absent */
  166. STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
  167. /* Rev ID */
  168. mdev_state->vconfig[0x8] = 0x10;
  169. /* programming interface class : 16550-compatible serial controller */
  170. mdev_state->vconfig[0x9] = 0x02;
  171. /* Sub class : 00 */
  172. mdev_state->vconfig[0xa] = 0x00;
  173. /* Base class : Simple Communication controllers */
  174. mdev_state->vconfig[0xb] = 0x07;
  175. /* base address registers */
  176. /* BAR0: IO space */
  177. STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
  178. mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
  179. if (mdev_state->nr_ports == 2) {
  180. /* BAR1: IO space */
  181. STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
  182. mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
  183. }
  184. /* Subsystem ID */
  185. STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
  186. mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
  187. mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
  188. /* Vendor specific data */
  189. mdev_state->vconfig[0x40] = 0x23;
  190. mdev_state->vconfig[0x43] = 0x80;
  191. mdev_state->vconfig[0x44] = 0x23;
  192. mdev_state->vconfig[0x48] = 0x23;
  193. mdev_state->vconfig[0x4c] = 0x23;
  194. mdev_state->vconfig[0x60] = 0x50;
  195. mdev_state->vconfig[0x61] = 0x43;
  196. mdev_state->vconfig[0x62] = 0x49;
  197. mdev_state->vconfig[0x63] = 0x20;
  198. mdev_state->vconfig[0x64] = 0x53;
  199. mdev_state->vconfig[0x65] = 0x65;
  200. mdev_state->vconfig[0x66] = 0x72;
  201. mdev_state->vconfig[0x67] = 0x69;
  202. mdev_state->vconfig[0x68] = 0x61;
  203. mdev_state->vconfig[0x69] = 0x6c;
  204. mdev_state->vconfig[0x6a] = 0x2f;
  205. mdev_state->vconfig[0x6b] = 0x55;
  206. mdev_state->vconfig[0x6c] = 0x41;
  207. mdev_state->vconfig[0x6d] = 0x52;
  208. mdev_state->vconfig[0x6e] = 0x54;
  209. }
  210. static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
  211. char *buf, u32 count)
  212. {
  213. u32 cfg_addr, bar_mask, bar_index = 0;
  214. switch (offset) {
  215. case 0x04: /* device control */
  216. case 0x06: /* device status */
  217. /* do nothing */
  218. break;
  219. case 0x3c: /* interrupt line */
  220. mdev_state->vconfig[0x3c] = buf[0];
  221. break;
  222. case 0x3d:
  223. /*
  224. * Interrupt Pin is hardwired to INTA.
  225. * This field is write protected by hardware
  226. */
  227. break;
  228. case 0x10: /* BAR0 */
  229. case 0x14: /* BAR1 */
  230. if (offset == 0x10)
  231. bar_index = 0;
  232. else if (offset == 0x14)
  233. bar_index = 1;
  234. if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
  235. STORE_LE32(&mdev_state->vconfig[offset], 0);
  236. break;
  237. }
  238. cfg_addr = *(u32 *)buf;
  239. pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
  240. if (cfg_addr == 0xffffffff) {
  241. bar_mask = mdev_state->bar_mask[bar_index];
  242. cfg_addr = (cfg_addr & bar_mask);
  243. }
  244. cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
  245. STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
  246. break;
  247. case 0x18: /* BAR2 */
  248. case 0x1c: /* BAR3 */
  249. case 0x20: /* BAR4 */
  250. STORE_LE32(&mdev_state->vconfig[offset], 0);
  251. break;
  252. default:
  253. pr_info("PCI config write @0x%x of %d bytes not handled\n",
  254. offset, count);
  255. break;
  256. }
  257. }
  258. static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
  259. u16 offset, char *buf, u32 count)
  260. {
  261. u8 data = *buf;
  262. /* Handle data written by guest */
  263. switch (offset) {
  264. case UART_TX:
  265. /* if DLAB set, data is LSB of divisor */
  266. if (mdev_state->s[index].dlab) {
  267. mdev_state->s[index].divisor |= data;
  268. break;
  269. }
  270. mutex_lock(&mdev_state->rxtx_lock);
  271. /* save in TX buffer */
  272. if (mdev_state->s[index].rxtx.count <
  273. mdev_state->s[index].max_fifo_size) {
  274. mdev_state->s[index].rxtx.fifo[
  275. mdev_state->s[index].rxtx.head] = data;
  276. mdev_state->s[index].rxtx.count++;
  277. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
  278. mdev_state->s[index].overrun = false;
  279. /*
  280. * Trigger interrupt if receive data interrupt is
  281. * enabled and fifo reached trigger level
  282. */
  283. if ((mdev_state->s[index].uart_reg[UART_IER] &
  284. UART_IER_RDI) &&
  285. (mdev_state->s[index].rxtx.count ==
  286. mdev_state->s[index].intr_trigger_level)) {
  287. /* trigger interrupt */
  288. #if defined(DEBUG_INTR)
  289. pr_err("Serial port %d: Fifo level trigger\n",
  290. index);
  291. #endif
  292. mtty_trigger_interrupt(
  293. mdev_uuid(mdev_state->mdev));
  294. }
  295. } else {
  296. #if defined(DEBUG_INTR)
  297. pr_err("Serial port %d: Buffer Overflow\n", index);
  298. #endif
  299. mdev_state->s[index].overrun = true;
  300. /*
  301. * Trigger interrupt if receiver line status interrupt
  302. * is enabled
  303. */
  304. if (mdev_state->s[index].uart_reg[UART_IER] &
  305. UART_IER_RLSI)
  306. mtty_trigger_interrupt(
  307. mdev_uuid(mdev_state->mdev));
  308. }
  309. mutex_unlock(&mdev_state->rxtx_lock);
  310. break;
  311. case UART_IER:
  312. /* if DLAB set, data is MSB of divisor */
  313. if (mdev_state->s[index].dlab)
  314. mdev_state->s[index].divisor |= (u16)data << 8;
  315. else {
  316. mdev_state->s[index].uart_reg[offset] = data;
  317. mutex_lock(&mdev_state->rxtx_lock);
  318. if ((data & UART_IER_THRI) &&
  319. (mdev_state->s[index].rxtx.head ==
  320. mdev_state->s[index].rxtx.tail)) {
  321. #if defined(DEBUG_INTR)
  322. pr_err("Serial port %d: IER_THRI write\n",
  323. index);
  324. #endif
  325. mtty_trigger_interrupt(
  326. mdev_uuid(mdev_state->mdev));
  327. }
  328. mutex_unlock(&mdev_state->rxtx_lock);
  329. }
  330. break;
  331. case UART_FCR:
  332. mdev_state->s[index].fcr = data;
  333. mutex_lock(&mdev_state->rxtx_lock);
  334. if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
  335. /* clear loop back FIFO */
  336. mdev_state->s[index].rxtx.count = 0;
  337. mdev_state->s[index].rxtx.head = 0;
  338. mdev_state->s[index].rxtx.tail = 0;
  339. }
  340. mutex_unlock(&mdev_state->rxtx_lock);
  341. switch (data & UART_FCR_TRIGGER_MASK) {
  342. case UART_FCR_TRIGGER_1:
  343. mdev_state->s[index].intr_trigger_level = 1;
  344. break;
  345. case UART_FCR_TRIGGER_4:
  346. mdev_state->s[index].intr_trigger_level = 4;
  347. break;
  348. case UART_FCR_TRIGGER_8:
  349. mdev_state->s[index].intr_trigger_level = 8;
  350. break;
  351. case UART_FCR_TRIGGER_14:
  352. mdev_state->s[index].intr_trigger_level = 14;
  353. break;
  354. }
  355. /*
  356. * Set trigger level to 1 otherwise or implement timer with
  357. * timeout of 4 characters and on expiring that timer set
  358. * Recevice data timeout in IIR register
  359. */
  360. mdev_state->s[index].intr_trigger_level = 1;
  361. if (data & UART_FCR_ENABLE_FIFO)
  362. mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
  363. else {
  364. mdev_state->s[index].max_fifo_size = 1;
  365. mdev_state->s[index].intr_trigger_level = 1;
  366. }
  367. break;
  368. case UART_LCR:
  369. if (data & UART_LCR_DLAB) {
  370. mdev_state->s[index].dlab = true;
  371. mdev_state->s[index].divisor = 0;
  372. } else
  373. mdev_state->s[index].dlab = false;
  374. mdev_state->s[index].uart_reg[offset] = data;
  375. break;
  376. case UART_MCR:
  377. mdev_state->s[index].uart_reg[offset] = data;
  378. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  379. (data & UART_MCR_OUT2)) {
  380. #if defined(DEBUG_INTR)
  381. pr_err("Serial port %d: MCR_OUT2 write\n", index);
  382. #endif
  383. mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
  384. }
  385. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  386. (data & (UART_MCR_RTS | UART_MCR_DTR))) {
  387. #if defined(DEBUG_INTR)
  388. pr_err("Serial port %d: MCR RTS/DTR write\n", index);
  389. #endif
  390. mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
  391. }
  392. break;
  393. case UART_LSR:
  394. case UART_MSR:
  395. /* do nothing */
  396. break;
  397. case UART_SCR:
  398. mdev_state->s[index].uart_reg[offset] = data;
  399. break;
  400. default:
  401. break;
  402. }
  403. }
  404. static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
  405. u16 offset, char *buf, u32 count)
  406. {
  407. /* Handle read requests by guest */
  408. switch (offset) {
  409. case UART_RX:
  410. /* if DLAB set, data is LSB of divisor */
  411. if (mdev_state->s[index].dlab) {
  412. *buf = (u8)mdev_state->s[index].divisor;
  413. break;
  414. }
  415. mutex_lock(&mdev_state->rxtx_lock);
  416. /* return data in tx buffer */
  417. if (mdev_state->s[index].rxtx.head !=
  418. mdev_state->s[index].rxtx.tail) {
  419. *buf = mdev_state->s[index].rxtx.fifo[
  420. mdev_state->s[index].rxtx.tail];
  421. mdev_state->s[index].rxtx.count--;
  422. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
  423. }
  424. if (mdev_state->s[index].rxtx.head ==
  425. mdev_state->s[index].rxtx.tail) {
  426. /*
  427. * Trigger interrupt if tx buffer empty interrupt is
  428. * enabled and fifo is empty
  429. */
  430. #if defined(DEBUG_INTR)
  431. pr_err("Serial port %d: Buffer Empty\n", index);
  432. #endif
  433. if (mdev_state->s[index].uart_reg[UART_IER] &
  434. UART_IER_THRI)
  435. mtty_trigger_interrupt(
  436. mdev_uuid(mdev_state->mdev));
  437. }
  438. mutex_unlock(&mdev_state->rxtx_lock);
  439. break;
  440. case UART_IER:
  441. if (mdev_state->s[index].dlab) {
  442. *buf = (u8)(mdev_state->s[index].divisor >> 8);
  443. break;
  444. }
  445. *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
  446. break;
  447. case UART_IIR:
  448. {
  449. u8 ier = mdev_state->s[index].uart_reg[UART_IER];
  450. *buf = 0;
  451. mutex_lock(&mdev_state->rxtx_lock);
  452. /* Interrupt priority 1: Parity, overrun, framing or break */
  453. if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
  454. *buf |= UART_IIR_RLSI;
  455. /* Interrupt priority 2: Fifo trigger level reached */
  456. if ((ier & UART_IER_RDI) &&
  457. (mdev_state->s[index].rxtx.count ==
  458. mdev_state->s[index].intr_trigger_level))
  459. *buf |= UART_IIR_RDI;
  460. /* Interrupt priotiry 3: transmitter holding register empty */
  461. if ((ier & UART_IER_THRI) &&
  462. (mdev_state->s[index].rxtx.head ==
  463. mdev_state->s[index].rxtx.tail))
  464. *buf |= UART_IIR_THRI;
  465. /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
  466. if ((ier & UART_IER_MSI) &&
  467. (mdev_state->s[index].uart_reg[UART_MCR] &
  468. (UART_MCR_RTS | UART_MCR_DTR)))
  469. *buf |= UART_IIR_MSI;
  470. /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
  471. if (*buf == 0)
  472. *buf = UART_IIR_NO_INT;
  473. /* set bit 6 & 7 to be 16550 compatible */
  474. *buf |= 0xC0;
  475. mutex_unlock(&mdev_state->rxtx_lock);
  476. }
  477. break;
  478. case UART_LCR:
  479. case UART_MCR:
  480. *buf = mdev_state->s[index].uart_reg[offset];
  481. break;
  482. case UART_LSR:
  483. {
  484. u8 lsr = 0;
  485. mutex_lock(&mdev_state->rxtx_lock);
  486. /* atleast one char in FIFO */
  487. if (mdev_state->s[index].rxtx.head !=
  488. mdev_state->s[index].rxtx.tail)
  489. lsr |= UART_LSR_DR;
  490. /* if FIFO overrun */
  491. if (mdev_state->s[index].overrun)
  492. lsr |= UART_LSR_OE;
  493. /* transmit FIFO empty and tramsitter empty */
  494. if (mdev_state->s[index].rxtx.head ==
  495. mdev_state->s[index].rxtx.tail)
  496. lsr |= UART_LSR_TEMT | UART_LSR_THRE;
  497. mutex_unlock(&mdev_state->rxtx_lock);
  498. *buf = lsr;
  499. break;
  500. }
  501. case UART_MSR:
  502. *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
  503. mutex_lock(&mdev_state->rxtx_lock);
  504. /* if AFE is 1 and FIFO have space, set CTS bit */
  505. if (mdev_state->s[index].uart_reg[UART_MCR] &
  506. UART_MCR_AFE) {
  507. if (mdev_state->s[index].rxtx.count <
  508. mdev_state->s[index].max_fifo_size)
  509. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  510. } else
  511. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  512. mutex_unlock(&mdev_state->rxtx_lock);
  513. break;
  514. case UART_SCR:
  515. *buf = mdev_state->s[index].uart_reg[offset];
  516. break;
  517. default:
  518. break;
  519. }
  520. }
  521. static void mdev_read_base(struct mdev_state *mdev_state)
  522. {
  523. int index, pos;
  524. u32 start_lo, start_hi;
  525. u32 mem_type;
  526. pos = PCI_BASE_ADDRESS_0;
  527. for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
  528. if (!mdev_state->region_info[index].size)
  529. continue;
  530. start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
  531. PCI_BASE_ADDRESS_MEM_MASK;
  532. mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
  533. PCI_BASE_ADDRESS_MEM_TYPE_MASK;
  534. switch (mem_type) {
  535. case PCI_BASE_ADDRESS_MEM_TYPE_64:
  536. start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
  537. pos += 4;
  538. break;
  539. case PCI_BASE_ADDRESS_MEM_TYPE_32:
  540. case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  541. /* 1M mem BAR treated as 32-bit BAR */
  542. default:
  543. /* mem unknown type treated as 32-bit BAR */
  544. start_hi = 0;
  545. break;
  546. }
  547. pos += 4;
  548. mdev_state->region_info[index].start = ((u64)start_hi << 32) |
  549. start_lo;
  550. }
  551. }
  552. static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
  553. loff_t pos, bool is_write)
  554. {
  555. struct mdev_state *mdev_state;
  556. unsigned int index;
  557. loff_t offset;
  558. int ret = 0;
  559. if (!mdev || !buf)
  560. return -EINVAL;
  561. mdev_state = mdev_get_drvdata(mdev);
  562. if (!mdev_state) {
  563. pr_err("%s mdev_state not found\n", __func__);
  564. return -EINVAL;
  565. }
  566. mutex_lock(&mdev_state->ops_lock);
  567. index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
  568. offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
  569. switch (index) {
  570. case VFIO_PCI_CONFIG_REGION_INDEX:
  571. #if defined(DEBUG)
  572. pr_info("%s: PCI config space %s at offset 0x%llx\n",
  573. __func__, is_write ? "write" : "read", offset);
  574. #endif
  575. if (is_write) {
  576. dump_buffer(buf, count);
  577. handle_pci_cfg_write(mdev_state, offset, buf, count);
  578. } else {
  579. memcpy(buf, (mdev_state->vconfig + offset), count);
  580. dump_buffer(buf, count);
  581. }
  582. break;
  583. case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
  584. if (!mdev_state->region_info[index].start)
  585. mdev_read_base(mdev_state);
  586. if (is_write) {
  587. dump_buffer(buf, count);
  588. #if defined(DEBUG_REGS)
  589. pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
  590. __func__, index, offset, wr_reg[offset],
  591. (u8)*buf, mdev_state->s[index].dlab);
  592. #endif
  593. handle_bar_write(index, mdev_state, offset, buf, count);
  594. } else {
  595. handle_bar_read(index, mdev_state, offset, buf, count);
  596. dump_buffer(buf, count);
  597. #if defined(DEBUG_REGS)
  598. pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
  599. __func__, index, offset, rd_reg[offset],
  600. (u8)*buf, mdev_state->s[index].dlab);
  601. #endif
  602. }
  603. break;
  604. default:
  605. ret = -1;
  606. goto accessfailed;
  607. }
  608. ret = count;
  609. accessfailed:
  610. mutex_unlock(&mdev_state->ops_lock);
  611. return ret;
  612. }
  613. int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
  614. {
  615. struct mdev_state *mdev_state;
  616. char name[MTTY_STRING_LEN];
  617. int nr_ports = 0, i;
  618. if (!mdev)
  619. return -EINVAL;
  620. for (i = 0; i < 2; i++) {
  621. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  622. dev_driver_string(mdev_parent_dev(mdev)), i + 1);
  623. if (!strcmp(kobj->name, name)) {
  624. nr_ports = i + 1;
  625. break;
  626. }
  627. }
  628. if (!nr_ports)
  629. return -EINVAL;
  630. mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
  631. if (mdev_state == NULL)
  632. return -ENOMEM;
  633. mdev_state->nr_ports = nr_ports;
  634. mdev_state->irq_index = -1;
  635. mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
  636. mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
  637. mutex_init(&mdev_state->rxtx_lock);
  638. mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
  639. if (mdev_state->vconfig == NULL) {
  640. kfree(mdev_state);
  641. return -ENOMEM;
  642. }
  643. mutex_init(&mdev_state->ops_lock);
  644. mdev_state->mdev = mdev;
  645. mdev_set_drvdata(mdev, mdev_state);
  646. mtty_create_config_space(mdev_state);
  647. mutex_lock(&mdev_list_lock);
  648. list_add(&mdev_state->next, &mdev_devices_list);
  649. mutex_unlock(&mdev_list_lock);
  650. return 0;
  651. }
  652. int mtty_remove(struct mdev_device *mdev)
  653. {
  654. struct mdev_state *mds, *tmp_mds;
  655. struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
  656. int ret = -EINVAL;
  657. mutex_lock(&mdev_list_lock);
  658. list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
  659. if (mdev_state == mds) {
  660. list_del(&mdev_state->next);
  661. mdev_set_drvdata(mdev, NULL);
  662. kfree(mdev_state->vconfig);
  663. kfree(mdev_state);
  664. ret = 0;
  665. break;
  666. }
  667. }
  668. mutex_unlock(&mdev_list_lock);
  669. return ret;
  670. }
  671. int mtty_reset(struct mdev_device *mdev)
  672. {
  673. struct mdev_state *mdev_state;
  674. if (!mdev)
  675. return -EINVAL;
  676. mdev_state = mdev_get_drvdata(mdev);
  677. if (!mdev_state)
  678. return -EINVAL;
  679. pr_info("%s: called\n", __func__);
  680. return 0;
  681. }
  682. ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
  683. loff_t *ppos)
  684. {
  685. unsigned int done = 0;
  686. int ret;
  687. while (count) {
  688. size_t filled;
  689. if (count >= 4 && !(*ppos % 4)) {
  690. u32 val;
  691. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  692. *ppos, false);
  693. if (ret <= 0)
  694. goto read_err;
  695. if (copy_to_user(buf, &val, sizeof(val)))
  696. goto read_err;
  697. filled = 4;
  698. } else if (count >= 2 && !(*ppos % 2)) {
  699. u16 val;
  700. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  701. *ppos, false);
  702. if (ret <= 0)
  703. goto read_err;
  704. if (copy_to_user(buf, &val, sizeof(val)))
  705. goto read_err;
  706. filled = 2;
  707. } else {
  708. u8 val;
  709. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  710. *ppos, false);
  711. if (ret <= 0)
  712. goto read_err;
  713. if (copy_to_user(buf, &val, sizeof(val)))
  714. goto read_err;
  715. filled = 1;
  716. }
  717. count -= filled;
  718. done += filled;
  719. *ppos += filled;
  720. buf += filled;
  721. }
  722. return done;
  723. read_err:
  724. return -EFAULT;
  725. }
  726. ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
  727. size_t count, loff_t *ppos)
  728. {
  729. unsigned int done = 0;
  730. int ret;
  731. while (count) {
  732. size_t filled;
  733. if (count >= 4 && !(*ppos % 4)) {
  734. u32 val;
  735. if (copy_from_user(&val, buf, sizeof(val)))
  736. goto write_err;
  737. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  738. *ppos, true);
  739. if (ret <= 0)
  740. goto write_err;
  741. filled = 4;
  742. } else if (count >= 2 && !(*ppos % 2)) {
  743. u16 val;
  744. if (copy_from_user(&val, buf, sizeof(val)))
  745. goto write_err;
  746. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  747. *ppos, true);
  748. if (ret <= 0)
  749. goto write_err;
  750. filled = 2;
  751. } else {
  752. u8 val;
  753. if (copy_from_user(&val, buf, sizeof(val)))
  754. goto write_err;
  755. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  756. *ppos, true);
  757. if (ret <= 0)
  758. goto write_err;
  759. filled = 1;
  760. }
  761. count -= filled;
  762. done += filled;
  763. *ppos += filled;
  764. buf += filled;
  765. }
  766. return done;
  767. write_err:
  768. return -EFAULT;
  769. }
  770. static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
  771. unsigned int index, unsigned int start,
  772. unsigned int count, void *data)
  773. {
  774. int ret = 0;
  775. struct mdev_state *mdev_state;
  776. if (!mdev)
  777. return -EINVAL;
  778. mdev_state = mdev_get_drvdata(mdev);
  779. if (!mdev_state)
  780. return -EINVAL;
  781. mutex_lock(&mdev_state->ops_lock);
  782. switch (index) {
  783. case VFIO_PCI_INTX_IRQ_INDEX:
  784. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  785. case VFIO_IRQ_SET_ACTION_MASK:
  786. case VFIO_IRQ_SET_ACTION_UNMASK:
  787. break;
  788. case VFIO_IRQ_SET_ACTION_TRIGGER:
  789. {
  790. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  791. pr_info("%s: disable INTx\n", __func__);
  792. if (mdev_state->intx_evtfd)
  793. eventfd_ctx_put(mdev_state->intx_evtfd);
  794. break;
  795. }
  796. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  797. int fd = *(int *)data;
  798. if (fd > 0) {
  799. struct eventfd_ctx *evt;
  800. evt = eventfd_ctx_fdget(fd);
  801. if (IS_ERR(evt)) {
  802. ret = PTR_ERR(evt);
  803. break;
  804. }
  805. mdev_state->intx_evtfd = evt;
  806. mdev_state->irq_fd = fd;
  807. mdev_state->irq_index = index;
  808. break;
  809. }
  810. }
  811. break;
  812. }
  813. }
  814. break;
  815. case VFIO_PCI_MSI_IRQ_INDEX:
  816. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  817. case VFIO_IRQ_SET_ACTION_MASK:
  818. case VFIO_IRQ_SET_ACTION_UNMASK:
  819. break;
  820. case VFIO_IRQ_SET_ACTION_TRIGGER:
  821. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  822. if (mdev_state->msi_evtfd)
  823. eventfd_ctx_put(mdev_state->msi_evtfd);
  824. pr_info("%s: disable MSI\n", __func__);
  825. mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
  826. break;
  827. }
  828. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  829. int fd = *(int *)data;
  830. struct eventfd_ctx *evt;
  831. if (fd <= 0)
  832. break;
  833. if (mdev_state->msi_evtfd)
  834. break;
  835. evt = eventfd_ctx_fdget(fd);
  836. if (IS_ERR(evt)) {
  837. ret = PTR_ERR(evt);
  838. break;
  839. }
  840. mdev_state->msi_evtfd = evt;
  841. mdev_state->irq_fd = fd;
  842. mdev_state->irq_index = index;
  843. }
  844. break;
  845. }
  846. break;
  847. case VFIO_PCI_MSIX_IRQ_INDEX:
  848. pr_info("%s: MSIX_IRQ\n", __func__);
  849. break;
  850. case VFIO_PCI_ERR_IRQ_INDEX:
  851. pr_info("%s: ERR_IRQ\n", __func__);
  852. break;
  853. case VFIO_PCI_REQ_IRQ_INDEX:
  854. pr_info("%s: REQ_IRQ\n", __func__);
  855. break;
  856. }
  857. mutex_unlock(&mdev_state->ops_lock);
  858. return ret;
  859. }
  860. static int mtty_trigger_interrupt(uuid_le uuid)
  861. {
  862. int ret = -1;
  863. struct mdev_state *mdev_state;
  864. mdev_state = find_mdev_state_by_uuid(uuid);
  865. if (!mdev_state) {
  866. pr_info("%s: mdev not found\n", __func__);
  867. return -EINVAL;
  868. }
  869. if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
  870. (!mdev_state->msi_evtfd))
  871. return -EINVAL;
  872. else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
  873. (!mdev_state->intx_evtfd)) {
  874. pr_info("%s: Intr eventfd not found\n", __func__);
  875. return -EINVAL;
  876. }
  877. if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
  878. ret = eventfd_signal(mdev_state->msi_evtfd, 1);
  879. else
  880. ret = eventfd_signal(mdev_state->intx_evtfd, 1);
  881. #if defined(DEBUG_INTR)
  882. pr_info("Intx triggered\n");
  883. #endif
  884. if (ret != 1)
  885. pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
  886. return ret;
  887. }
  888. int mtty_get_region_info(struct mdev_device *mdev,
  889. struct vfio_region_info *region_info,
  890. u16 *cap_type_id, void **cap_type)
  891. {
  892. unsigned int size = 0;
  893. struct mdev_state *mdev_state;
  894. int bar_index;
  895. if (!mdev)
  896. return -EINVAL;
  897. mdev_state = mdev_get_drvdata(mdev);
  898. if (!mdev_state)
  899. return -EINVAL;
  900. mutex_lock(&mdev_state->ops_lock);
  901. bar_index = region_info->index;
  902. switch (bar_index) {
  903. case VFIO_PCI_CONFIG_REGION_INDEX:
  904. size = MTTY_CONFIG_SPACE_SIZE;
  905. break;
  906. case VFIO_PCI_BAR0_REGION_INDEX:
  907. size = MTTY_IO_BAR_SIZE;
  908. break;
  909. case VFIO_PCI_BAR1_REGION_INDEX:
  910. if (mdev_state->nr_ports == 2)
  911. size = MTTY_IO_BAR_SIZE;
  912. break;
  913. default:
  914. size = 0;
  915. break;
  916. }
  917. mdev_state->region_info[bar_index].size = size;
  918. mdev_state->region_info[bar_index].vfio_offset =
  919. MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  920. region_info->size = size;
  921. region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  922. region_info->flags = VFIO_REGION_INFO_FLAG_READ |
  923. VFIO_REGION_INFO_FLAG_WRITE;
  924. mutex_unlock(&mdev_state->ops_lock);
  925. return 0;
  926. }
  927. int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
  928. {
  929. switch (irq_info->index) {
  930. case VFIO_PCI_INTX_IRQ_INDEX:
  931. case VFIO_PCI_MSI_IRQ_INDEX:
  932. case VFIO_PCI_REQ_IRQ_INDEX:
  933. break;
  934. default:
  935. return -EINVAL;
  936. }
  937. irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
  938. irq_info->count = 1;
  939. if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
  940. irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
  941. VFIO_IRQ_INFO_AUTOMASKED);
  942. else
  943. irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
  944. return 0;
  945. }
  946. int mtty_get_device_info(struct mdev_device *mdev,
  947. struct vfio_device_info *dev_info)
  948. {
  949. dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
  950. dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
  951. dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
  952. return 0;
  953. }
  954. static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
  955. unsigned long arg)
  956. {
  957. int ret = 0;
  958. unsigned long minsz;
  959. struct mdev_state *mdev_state;
  960. if (!mdev)
  961. return -EINVAL;
  962. mdev_state = mdev_get_drvdata(mdev);
  963. if (!mdev_state)
  964. return -ENODEV;
  965. switch (cmd) {
  966. case VFIO_DEVICE_GET_INFO:
  967. {
  968. struct vfio_device_info info;
  969. minsz = offsetofend(struct vfio_device_info, num_irqs);
  970. if (copy_from_user(&info, (void __user *)arg, minsz))
  971. return -EFAULT;
  972. if (info.argsz < minsz)
  973. return -EINVAL;
  974. ret = mtty_get_device_info(mdev, &info);
  975. if (ret)
  976. return ret;
  977. memcpy(&mdev_state->dev_info, &info, sizeof(info));
  978. return copy_to_user((void __user *)arg, &info, minsz);
  979. }
  980. case VFIO_DEVICE_GET_REGION_INFO:
  981. {
  982. struct vfio_region_info info;
  983. u16 cap_type_id = 0;
  984. void *cap_type = NULL;
  985. minsz = offsetofend(struct vfio_region_info, offset);
  986. if (copy_from_user(&info, (void __user *)arg, minsz))
  987. return -EFAULT;
  988. if (info.argsz < minsz)
  989. return -EINVAL;
  990. ret = mtty_get_region_info(mdev, &info, &cap_type_id,
  991. &cap_type);
  992. if (ret)
  993. return ret;
  994. return copy_to_user((void __user *)arg, &info, minsz);
  995. }
  996. case VFIO_DEVICE_GET_IRQ_INFO:
  997. {
  998. struct vfio_irq_info info;
  999. minsz = offsetofend(struct vfio_irq_info, count);
  1000. if (copy_from_user(&info, (void __user *)arg, minsz))
  1001. return -EFAULT;
  1002. if ((info.argsz < minsz) ||
  1003. (info.index >= mdev_state->dev_info.num_irqs))
  1004. return -EINVAL;
  1005. ret = mtty_get_irq_info(mdev, &info);
  1006. if (ret)
  1007. return ret;
  1008. if (info.count == -1)
  1009. return -EINVAL;
  1010. return copy_to_user((void __user *)arg, &info, minsz);
  1011. }
  1012. case VFIO_DEVICE_SET_IRQS:
  1013. {
  1014. struct vfio_irq_set hdr;
  1015. u8 *data = NULL, *ptr = NULL;
  1016. size_t data_size = 0;
  1017. minsz = offsetofend(struct vfio_irq_set, count);
  1018. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  1019. return -EFAULT;
  1020. ret = vfio_set_irqs_validate_and_prepare(&hdr,
  1021. mdev_state->dev_info.num_irqs,
  1022. VFIO_PCI_NUM_IRQS,
  1023. &data_size);
  1024. if (ret)
  1025. return ret;
  1026. if (data_size) {
  1027. ptr = data = memdup_user((void __user *)(arg + minsz),
  1028. data_size);
  1029. if (IS_ERR(data))
  1030. return PTR_ERR(data);
  1031. }
  1032. ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
  1033. hdr.count, data);
  1034. kfree(ptr);
  1035. return ret;
  1036. }
  1037. case VFIO_DEVICE_RESET:
  1038. return mtty_reset(mdev);
  1039. }
  1040. return -ENOTTY;
  1041. }
  1042. int mtty_open(struct mdev_device *mdev)
  1043. {
  1044. pr_info("%s\n", __func__);
  1045. return 0;
  1046. }
  1047. void mtty_close(struct mdev_device *mdev)
  1048. {
  1049. pr_info("%s\n", __func__);
  1050. }
  1051. static ssize_t
  1052. sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
  1053. char *buf)
  1054. {
  1055. return sprintf(buf, "This is phy device\n");
  1056. }
  1057. static DEVICE_ATTR_RO(sample_mtty_dev);
  1058. static struct attribute *mtty_dev_attrs[] = {
  1059. &dev_attr_sample_mtty_dev.attr,
  1060. NULL,
  1061. };
  1062. static const struct attribute_group mtty_dev_group = {
  1063. .name = "mtty_dev",
  1064. .attrs = mtty_dev_attrs,
  1065. };
  1066. const struct attribute_group *mtty_dev_groups[] = {
  1067. &mtty_dev_group,
  1068. NULL,
  1069. };
  1070. static ssize_t
  1071. sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
  1072. char *buf)
  1073. {
  1074. if (mdev_from_dev(dev))
  1075. return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
  1076. return sprintf(buf, "\n");
  1077. }
  1078. static DEVICE_ATTR_RO(sample_mdev_dev);
  1079. static struct attribute *mdev_dev_attrs[] = {
  1080. &dev_attr_sample_mdev_dev.attr,
  1081. NULL,
  1082. };
  1083. static const struct attribute_group mdev_dev_group = {
  1084. .name = "vendor",
  1085. .attrs = mdev_dev_attrs,
  1086. };
  1087. const struct attribute_group *mdev_dev_groups[] = {
  1088. &mdev_dev_group,
  1089. NULL,
  1090. };
  1091. static ssize_t
  1092. name_show(struct kobject *kobj, struct device *dev, char *buf)
  1093. {
  1094. char name[MTTY_STRING_LEN];
  1095. int i;
  1096. const char *name_str[2] = {"Single port serial", "Dual port serial"};
  1097. for (i = 0; i < 2; i++) {
  1098. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  1099. dev_driver_string(dev), i + 1);
  1100. if (!strcmp(kobj->name, name))
  1101. return sprintf(buf, "%s\n", name_str[i]);
  1102. }
  1103. return -EINVAL;
  1104. }
  1105. MDEV_TYPE_ATTR_RO(name);
  1106. static ssize_t
  1107. available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
  1108. {
  1109. char name[MTTY_STRING_LEN];
  1110. int i;
  1111. struct mdev_state *mds;
  1112. int ports = 0, used = 0;
  1113. for (i = 0; i < 2; i++) {
  1114. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  1115. dev_driver_string(dev), i + 1);
  1116. if (!strcmp(kobj->name, name)) {
  1117. ports = i + 1;
  1118. break;
  1119. }
  1120. }
  1121. if (!ports)
  1122. return -EINVAL;
  1123. list_for_each_entry(mds, &mdev_devices_list, next)
  1124. used += mds->nr_ports;
  1125. return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
  1126. }
  1127. MDEV_TYPE_ATTR_RO(available_instances);
  1128. static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  1129. char *buf)
  1130. {
  1131. return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
  1132. }
  1133. MDEV_TYPE_ATTR_RO(device_api);
  1134. static struct attribute *mdev_types_attrs[] = {
  1135. &mdev_type_attr_name.attr,
  1136. &mdev_type_attr_device_api.attr,
  1137. &mdev_type_attr_available_instances.attr,
  1138. NULL,
  1139. };
  1140. static struct attribute_group mdev_type_group1 = {
  1141. .name = "1",
  1142. .attrs = mdev_types_attrs,
  1143. };
  1144. static struct attribute_group mdev_type_group2 = {
  1145. .name = "2",
  1146. .attrs = mdev_types_attrs,
  1147. };
  1148. struct attribute_group *mdev_type_groups[] = {
  1149. &mdev_type_group1,
  1150. &mdev_type_group2,
  1151. NULL,
  1152. };
  1153. struct mdev_parent_ops mdev_fops = {
  1154. .owner = THIS_MODULE,
  1155. .dev_attr_groups = mtty_dev_groups,
  1156. .mdev_attr_groups = mdev_dev_groups,
  1157. .supported_type_groups = mdev_type_groups,
  1158. .create = mtty_create,
  1159. .remove = mtty_remove,
  1160. .open = mtty_open,
  1161. .release = mtty_close,
  1162. .read = mtty_read,
  1163. .write = mtty_write,
  1164. .ioctl = mtty_ioctl,
  1165. };
  1166. static void mtty_device_release(struct device *dev)
  1167. {
  1168. dev_dbg(dev, "mtty: released\n");
  1169. }
  1170. static int __init mtty_dev_init(void)
  1171. {
  1172. int ret = 0;
  1173. pr_info("mtty_dev: %s\n", __func__);
  1174. memset(&mtty_dev, 0, sizeof(mtty_dev));
  1175. idr_init(&mtty_dev.vd_idr);
  1176. ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
  1177. if (ret < 0) {
  1178. pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
  1179. return ret;
  1180. }
  1181. cdev_init(&mtty_dev.vd_cdev, &vd_fops);
  1182. cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
  1183. pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
  1184. mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
  1185. if (IS_ERR(mtty_dev.vd_class)) {
  1186. pr_err("Error: failed to register mtty_dev class\n");
  1187. ret = PTR_ERR(mtty_dev.vd_class);
  1188. goto failed1;
  1189. }
  1190. mtty_dev.dev.class = mtty_dev.vd_class;
  1191. mtty_dev.dev.release = mtty_device_release;
  1192. dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
  1193. ret = device_register(&mtty_dev.dev);
  1194. if (ret)
  1195. goto failed2;
  1196. ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
  1197. if (ret)
  1198. goto failed3;
  1199. mutex_init(&mdev_list_lock);
  1200. INIT_LIST_HEAD(&mdev_devices_list);
  1201. goto all_done;
  1202. failed3:
  1203. device_unregister(&mtty_dev.dev);
  1204. failed2:
  1205. class_destroy(mtty_dev.vd_class);
  1206. failed1:
  1207. cdev_del(&mtty_dev.vd_cdev);
  1208. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
  1209. all_done:
  1210. return ret;
  1211. }
  1212. static void __exit mtty_dev_exit(void)
  1213. {
  1214. mtty_dev.dev.bus = NULL;
  1215. mdev_unregister_device(&mtty_dev.dev);
  1216. device_unregister(&mtty_dev.dev);
  1217. idr_destroy(&mtty_dev.vd_idr);
  1218. cdev_del(&mtty_dev.vd_cdev);
  1219. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
  1220. class_destroy(mtty_dev.vd_class);
  1221. mtty_dev.vd_class = NULL;
  1222. pr_info("mtty_dev: Unloaded!\n");
  1223. }
  1224. module_init(mtty_dev_init)
  1225. module_exit(mtty_dev_exit)
  1226. MODULE_LICENSE("GPL v2");
  1227. MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
  1228. MODULE_VERSION(VERSION_STRING);
  1229. MODULE_AUTHOR(DRIVER_AUTHOR);