hvc_iucv.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. /*
  2. * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
  3. *
  4. * This HVC device driver provides terminal access using
  5. * z/VM IUCV communication paths.
  6. *
  7. * Copyright IBM Corp. 2008, 2009
  8. *
  9. * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  10. */
  11. #define KMSG_COMPONENT "hvc_iucv"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/types.h>
  14. #include <linux/slab.h>
  15. #include <asm/ebcdic.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/init.h>
  20. #include <linux/mempool.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/tty.h>
  23. #include <linux/wait.h>
  24. #include <net/iucv/iucv.h>
  25. #include "hvc_console.h"
  26. /* General device driver settings */
  27. #define HVC_IUCV_MAGIC 0xc9e4c3e5
  28. #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
  29. #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  30. /* IUCV TTY message */
  31. #define MSG_VERSION 0x02 /* Message version */
  32. #define MSG_TYPE_ERROR 0x01 /* Error message */
  33. #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
  34. #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
  35. #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
  36. #define MSG_TYPE_DATA 0x10 /* Terminal data */
  37. struct iucv_tty_msg {
  38. u8 version; /* Message version */
  39. u8 type; /* Message type */
  40. #define MSG_MAX_DATALEN ((u16)(~0))
  41. u16 datalen; /* Payload length */
  42. u8 data[]; /* Payload buffer */
  43. } __attribute__((packed));
  44. #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
  45. enum iucv_state_t {
  46. IUCV_DISCONN = 0,
  47. IUCV_CONNECTED = 1,
  48. IUCV_SEVERED = 2,
  49. };
  50. enum tty_state_t {
  51. TTY_CLOSED = 0,
  52. TTY_OPENED = 1,
  53. };
  54. struct hvc_iucv_private {
  55. struct hvc_struct *hvc; /* HVC struct reference */
  56. u8 srv_name[8]; /* IUCV service name (ebcdic) */
  57. unsigned char is_console; /* Linux console usage flag */
  58. enum iucv_state_t iucv_state; /* IUCV connection status */
  59. enum tty_state_t tty_state; /* TTY status */
  60. struct iucv_path *path; /* IUCV path pointer */
  61. spinlock_t lock; /* hvc_iucv_private lock */
  62. #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
  63. void *sndbuf; /* send buffer */
  64. size_t sndbuf_len; /* length of send buffer */
  65. #define QUEUE_SNDBUF_DELAY (HZ / 25)
  66. struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
  67. wait_queue_head_t sndbuf_waitq; /* wait for send completion */
  68. struct list_head tty_outqueue; /* outgoing IUCV messages */
  69. struct list_head tty_inqueue; /* incoming IUCV messages */
  70. struct device *dev; /* device structure */
  71. };
  72. struct iucv_tty_buffer {
  73. struct list_head list; /* list pointer */
  74. struct iucv_message msg; /* store an IUCV message */
  75. size_t offset; /* data buffer offset */
  76. struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
  77. };
  78. /* IUCV callback handler */
  79. static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
  80. static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
  81. static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  82. static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  83. /* Kernel module parameter: use one terminal device as default */
  84. static unsigned long hvc_iucv_devices = 1;
  85. /* Array of allocated hvc iucv tty lines... */
  86. static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
  87. #define IUCV_HVC_CON_IDX (0)
  88. /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
  89. #define MAX_VMID_FILTER (500)
  90. static size_t hvc_iucv_filter_size;
  91. static void *hvc_iucv_filter;
  92. static const char *hvc_iucv_filter_string;
  93. static DEFINE_RWLOCK(hvc_iucv_filter_lock);
  94. /* Kmem cache and mempool for iucv_tty_buffer elements */
  95. static struct kmem_cache *hvc_iucv_buffer_cache;
  96. static mempool_t *hvc_iucv_mempool;
  97. /* IUCV handler callback functions */
  98. static struct iucv_handler hvc_iucv_handler = {
  99. .path_pending = hvc_iucv_path_pending,
  100. .path_severed = hvc_iucv_path_severed,
  101. .message_complete = hvc_iucv_msg_complete,
  102. .message_pending = hvc_iucv_msg_pending,
  103. };
  104. /**
  105. * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
  106. * @num: The HVC virtual terminal number (vtermno)
  107. *
  108. * This function returns the struct hvc_iucv_private instance that corresponds
  109. * to the HVC virtual terminal number specified as parameter @num.
  110. */
  111. struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
  112. {
  113. if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
  114. return NULL;
  115. return hvc_iucv_table[num - HVC_IUCV_MAGIC];
  116. }
  117. /**
  118. * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
  119. * @size: Size of the internal buffer used to store data.
  120. * @flags: Memory allocation flags passed to mempool.
  121. *
  122. * This function allocates a new struct iucv_tty_buffer element and, optionally,
  123. * allocates an internal data buffer with the specified size @size.
  124. * The internal data buffer is always allocated with GFP_DMA which is
  125. * required for receiving and sending data with IUCV.
  126. * Note: The total message size arises from the internal buffer size and the
  127. * members of the iucv_tty_msg structure.
  128. * The function returns NULL if memory allocation has failed.
  129. */
  130. static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
  131. {
  132. struct iucv_tty_buffer *bufp;
  133. bufp = mempool_alloc(hvc_iucv_mempool, flags);
  134. if (!bufp)
  135. return NULL;
  136. memset(bufp, 0, sizeof(*bufp));
  137. if (size > 0) {
  138. bufp->msg.length = MSG_SIZE(size);
  139. bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
  140. if (!bufp->mbuf) {
  141. mempool_free(bufp, hvc_iucv_mempool);
  142. return NULL;
  143. }
  144. bufp->mbuf->version = MSG_VERSION;
  145. bufp->mbuf->type = MSG_TYPE_DATA;
  146. bufp->mbuf->datalen = (u16) size;
  147. }
  148. return bufp;
  149. }
  150. /**
  151. * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
  152. * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
  153. */
  154. static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
  155. {
  156. kfree(bufp->mbuf);
  157. mempool_free(bufp, hvc_iucv_mempool);
  158. }
  159. /**
  160. * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
  161. * @list: List containing struct iucv_tty_buffer elements.
  162. */
  163. static void destroy_tty_buffer_list(struct list_head *list)
  164. {
  165. struct iucv_tty_buffer *ent, *next;
  166. list_for_each_entry_safe(ent, next, list, list) {
  167. list_del(&ent->list);
  168. destroy_tty_buffer(ent);
  169. }
  170. }
  171. /**
  172. * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
  173. * @priv: Pointer to struct hvc_iucv_private
  174. * @buf: HVC buffer for writing received terminal data.
  175. * @count: HVC buffer size.
  176. * @has_more_data: Pointer to an int variable.
  177. *
  178. * The function picks up pending messages from the input queue and receives
  179. * the message data that is then written to the specified buffer @buf.
  180. * If the buffer size @count is less than the data message size, the
  181. * message is kept on the input queue and @has_more_data is set to 1.
  182. * If all message data has been written, the message is removed from
  183. * the input queue.
  184. *
  185. * The function returns the number of bytes written to the terminal, zero if
  186. * there are no pending data messages available or if there is no established
  187. * IUCV path.
  188. * If the IUCV path has been severed, then -EPIPE is returned to cause a
  189. * hang up (that is issued by the HVC layer).
  190. */
  191. static int hvc_iucv_write(struct hvc_iucv_private *priv,
  192. char *buf, int count, int *has_more_data)
  193. {
  194. struct iucv_tty_buffer *rb;
  195. int written;
  196. int rc;
  197. /* immediately return if there is no IUCV connection */
  198. if (priv->iucv_state == IUCV_DISCONN)
  199. return 0;
  200. /* if the IUCV path has been severed, return -EPIPE to inform the
  201. * HVC layer to hang up the tty device. */
  202. if (priv->iucv_state == IUCV_SEVERED)
  203. return -EPIPE;
  204. /* check if there are pending messages */
  205. if (list_empty(&priv->tty_inqueue))
  206. return 0;
  207. /* receive an iucv message and flip data to the tty (ldisc) */
  208. rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
  209. written = 0;
  210. if (!rb->mbuf) { /* message not yet received ... */
  211. /* allocate mem to store msg data; if no memory is available
  212. * then leave the buffer on the list and re-try later */
  213. rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
  214. if (!rb->mbuf)
  215. return -ENOMEM;
  216. rc = __iucv_message_receive(priv->path, &rb->msg, 0,
  217. rb->mbuf, rb->msg.length, NULL);
  218. switch (rc) {
  219. case 0: /* Successful */
  220. break;
  221. case 2: /* No message found */
  222. case 9: /* Message purged */
  223. break;
  224. default:
  225. written = -EIO;
  226. }
  227. /* remove buffer if an error has occurred or received data
  228. * is not correct */
  229. if (rc || (rb->mbuf->version != MSG_VERSION) ||
  230. (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
  231. goto out_remove_buffer;
  232. }
  233. switch (rb->mbuf->type) {
  234. case MSG_TYPE_DATA:
  235. written = min_t(int, rb->mbuf->datalen - rb->offset, count);
  236. memcpy(buf, rb->mbuf->data + rb->offset, written);
  237. if (written < (rb->mbuf->datalen - rb->offset)) {
  238. rb->offset += written;
  239. *has_more_data = 1;
  240. goto out_written;
  241. }
  242. break;
  243. case MSG_TYPE_WINSIZE:
  244. if (rb->mbuf->datalen != sizeof(struct winsize))
  245. break;
  246. /* The caller must ensure that the hvc is locked, which
  247. * is the case when called from hvc_iucv_get_chars() */
  248. __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
  249. break;
  250. case MSG_TYPE_ERROR: /* ignored ... */
  251. case MSG_TYPE_TERMENV: /* ignored ... */
  252. case MSG_TYPE_TERMIOS: /* ignored ... */
  253. break;
  254. }
  255. out_remove_buffer:
  256. list_del(&rb->list);
  257. destroy_tty_buffer(rb);
  258. *has_more_data = !list_empty(&priv->tty_inqueue);
  259. out_written:
  260. return written;
  261. }
  262. /**
  263. * hvc_iucv_get_chars() - HVC get_chars operation.
  264. * @vtermno: HVC virtual terminal number.
  265. * @buf: Pointer to a buffer to store data
  266. * @count: Size of buffer available for writing
  267. *
  268. * The HVC thread calls this method to read characters from the back-end.
  269. * If an IUCV communication path has been established, pending IUCV messages
  270. * are received and data is copied into buffer @buf up to @count bytes.
  271. *
  272. * Locking: The routine gets called under an irqsave() spinlock; and
  273. * the routine locks the struct hvc_iucv_private->lock to call
  274. * helper functions.
  275. */
  276. static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
  277. {
  278. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  279. int written;
  280. int has_more_data;
  281. if (count <= 0)
  282. return 0;
  283. if (!priv)
  284. return -ENODEV;
  285. spin_lock(&priv->lock);
  286. has_more_data = 0;
  287. written = hvc_iucv_write(priv, buf, count, &has_more_data);
  288. spin_unlock(&priv->lock);
  289. /* if there are still messages on the queue... schedule another run */
  290. if (has_more_data)
  291. hvc_kick();
  292. return written;
  293. }
  294. /**
  295. * hvc_iucv_queue() - Buffer terminal data for sending.
  296. * @priv: Pointer to struct hvc_iucv_private instance.
  297. * @buf: Buffer containing data to send.
  298. * @count: Size of buffer and amount of data to send.
  299. *
  300. * The function queues data for sending. To actually send the buffered data,
  301. * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
  302. * The function returns the number of data bytes that has been buffered.
  303. *
  304. * If the device is not connected, data is ignored and the function returns
  305. * @count.
  306. * If the buffer is full, the function returns 0.
  307. * If an existing IUCV communicaton path has been severed, -EPIPE is returned
  308. * (that can be passed to HVC layer to cause a tty hangup).
  309. */
  310. static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
  311. int count)
  312. {
  313. size_t len;
  314. if (priv->iucv_state == IUCV_DISCONN)
  315. return count; /* ignore data */
  316. if (priv->iucv_state == IUCV_SEVERED)
  317. return -EPIPE;
  318. len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
  319. if (!len)
  320. return 0;
  321. memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
  322. priv->sndbuf_len += len;
  323. if (priv->iucv_state == IUCV_CONNECTED)
  324. schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
  325. return len;
  326. }
  327. /**
  328. * hvc_iucv_send() - Send an IUCV message containing terminal data.
  329. * @priv: Pointer to struct hvc_iucv_private instance.
  330. *
  331. * If an IUCV communication path has been established, the buffered output data
  332. * is sent via an IUCV message and the number of bytes sent is returned.
  333. * Returns 0 if there is no established IUCV communication path or
  334. * -EPIPE if an existing IUCV communicaton path has been severed.
  335. */
  336. static int hvc_iucv_send(struct hvc_iucv_private *priv)
  337. {
  338. struct iucv_tty_buffer *sb;
  339. int rc, len;
  340. if (priv->iucv_state == IUCV_SEVERED)
  341. return -EPIPE;
  342. if (priv->iucv_state == IUCV_DISCONN)
  343. return -EIO;
  344. if (!priv->sndbuf_len)
  345. return 0;
  346. /* allocate internal buffer to store msg data and also compute total
  347. * message length */
  348. sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
  349. if (!sb)
  350. return -ENOMEM;
  351. memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
  352. sb->mbuf->datalen = (u16) priv->sndbuf_len;
  353. sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
  354. list_add_tail(&sb->list, &priv->tty_outqueue);
  355. rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
  356. (void *) sb->mbuf, sb->msg.length);
  357. if (rc) {
  358. /* drop the message here; however we might want to handle
  359. * 0x03 (msg limit reached) by trying again... */
  360. list_del(&sb->list);
  361. destroy_tty_buffer(sb);
  362. }
  363. len = priv->sndbuf_len;
  364. priv->sndbuf_len = 0;
  365. return len;
  366. }
  367. /**
  368. * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
  369. * @work: Work structure.
  370. *
  371. * This work queue function sends buffered output data over IUCV and,
  372. * if not all buffered data could be sent, reschedules itself.
  373. */
  374. static void hvc_iucv_sndbuf_work(struct work_struct *work)
  375. {
  376. struct hvc_iucv_private *priv;
  377. priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
  378. if (!priv)
  379. return;
  380. spin_lock_bh(&priv->lock);
  381. hvc_iucv_send(priv);
  382. spin_unlock_bh(&priv->lock);
  383. }
  384. /**
  385. * hvc_iucv_put_chars() - HVC put_chars operation.
  386. * @vtermno: HVC virtual terminal number.
  387. * @buf: Pointer to an buffer to read data from
  388. * @count: Size of buffer available for reading
  389. *
  390. * The HVC thread calls this method to write characters to the back-end.
  391. * The function calls hvc_iucv_queue() to queue terminal data for sending.
  392. *
  393. * Locking: The method gets called under an irqsave() spinlock; and
  394. * locks struct hvc_iucv_private->lock.
  395. */
  396. static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
  397. {
  398. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  399. int queued;
  400. if (count <= 0)
  401. return 0;
  402. if (!priv)
  403. return -ENODEV;
  404. spin_lock(&priv->lock);
  405. queued = hvc_iucv_queue(priv, buf, count);
  406. spin_unlock(&priv->lock);
  407. return queued;
  408. }
  409. /**
  410. * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
  411. * @hp: Pointer to the HVC device (struct hvc_struct)
  412. * @id: Additional data (originally passed to hvc_alloc): the index of an struct
  413. * hvc_iucv_private instance.
  414. *
  415. * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
  416. * instance that is derived from @id. Always returns 0.
  417. *
  418. * Locking: struct hvc_iucv_private->lock, spin_lock_bh
  419. */
  420. static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
  421. {
  422. struct hvc_iucv_private *priv;
  423. priv = hvc_iucv_get_private(id);
  424. if (!priv)
  425. return 0;
  426. spin_lock_bh(&priv->lock);
  427. priv->tty_state = TTY_OPENED;
  428. spin_unlock_bh(&priv->lock);
  429. return 0;
  430. }
  431. /**
  432. * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
  433. * @priv: Pointer to the struct hvc_iucv_private instance.
  434. */
  435. static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
  436. {
  437. destroy_tty_buffer_list(&priv->tty_outqueue);
  438. destroy_tty_buffer_list(&priv->tty_inqueue);
  439. priv->tty_state = TTY_CLOSED;
  440. priv->iucv_state = IUCV_DISCONN;
  441. priv->sndbuf_len = 0;
  442. }
  443. /**
  444. * tty_outqueue_empty() - Test if the tty outq is empty
  445. * @priv: Pointer to struct hvc_iucv_private instance.
  446. */
  447. static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
  448. {
  449. int rc;
  450. spin_lock_bh(&priv->lock);
  451. rc = list_empty(&priv->tty_outqueue);
  452. spin_unlock_bh(&priv->lock);
  453. return rc;
  454. }
  455. /**
  456. * flush_sndbuf_sync() - Flush send buffer and wait for completion
  457. * @priv: Pointer to struct hvc_iucv_private instance.
  458. *
  459. * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
  460. * to flush any buffered terminal output data and waits for completion.
  461. */
  462. static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
  463. {
  464. int sync_wait;
  465. cancel_delayed_work_sync(&priv->sndbuf_work);
  466. spin_lock_bh(&priv->lock);
  467. hvc_iucv_send(priv); /* force sending buffered data */
  468. sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
  469. spin_unlock_bh(&priv->lock);
  470. if (sync_wait)
  471. wait_event_timeout(priv->sndbuf_waitq,
  472. tty_outqueue_empty(priv), HZ/10);
  473. }
  474. /**
  475. * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
  476. * @priv: Pointer to hvc_iucv_private structure
  477. *
  478. * This routine severs an existing IUCV communication path and hangs
  479. * up the underlying HVC terminal device.
  480. * The hang-up occurs only if an IUCV communication path is established;
  481. * otherwise there is no need to hang up the terminal device.
  482. *
  483. * The IUCV HVC hang-up is separated into two steps:
  484. * 1. After the IUCV path has been severed, the iucv_state is set to
  485. * IUCV_SEVERED.
  486. * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
  487. * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
  488. *
  489. * If the tty has not yet been opened, clean up the hvc_iucv_private
  490. * structure to allow re-connects.
  491. * If the tty has been opened, let get_chars() return -EPIPE to signal
  492. * the HVC layer to hang up the tty and, if so, wake up the HVC thread
  493. * to call get_chars()...
  494. *
  495. * Special notes on hanging up a HVC terminal instantiated as console:
  496. * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
  497. * 2. do_tty_hangup() calls tty->ops->close() for console_filp
  498. * => no hangup notifier is called by HVC (default)
  499. * 2. hvc_close() returns because of tty_hung_up_p(filp)
  500. * => no delete notifier is called!
  501. * Finally, the back-end is not being notified, thus, the tty session is
  502. * kept active (TTY_OPEN) to be ready for re-connects.
  503. *
  504. * Locking: spin_lock(&priv->lock) w/o disabling bh
  505. */
  506. static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
  507. {
  508. struct iucv_path *path;
  509. path = NULL;
  510. spin_lock(&priv->lock);
  511. if (priv->iucv_state == IUCV_CONNECTED) {
  512. path = priv->path;
  513. priv->path = NULL;
  514. priv->iucv_state = IUCV_SEVERED;
  515. if (priv->tty_state == TTY_CLOSED)
  516. hvc_iucv_cleanup(priv);
  517. else
  518. /* console is special (see above) */
  519. if (priv->is_console) {
  520. hvc_iucv_cleanup(priv);
  521. priv->tty_state = TTY_OPENED;
  522. } else
  523. hvc_kick();
  524. }
  525. spin_unlock(&priv->lock);
  526. /* finally sever path (outside of priv->lock due to lock ordering) */
  527. if (path) {
  528. iucv_path_sever(path, NULL);
  529. iucv_path_free(path);
  530. }
  531. }
  532. /**
  533. * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
  534. * @hp: Pointer to the HVC device (struct hvc_struct)
  535. * @id: Additional data (originally passed to hvc_alloc):
  536. * the index of an struct hvc_iucv_private instance.
  537. *
  538. * This routine notifies the HVC back-end that a tty hangup (carrier loss,
  539. * virtual or otherwise) has occurred.
  540. * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
  541. * to keep an existing IUCV communication path established.
  542. * (Background: vhangup() is called from user space (by getty or login) to
  543. * disable writing to the tty by other applications).
  544. * If the tty has been opened and an established IUCV path has been severed
  545. * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
  546. *
  547. * Locking: struct hvc_iucv_private->lock
  548. */
  549. static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
  550. {
  551. struct hvc_iucv_private *priv;
  552. priv = hvc_iucv_get_private(id);
  553. if (!priv)
  554. return;
  555. flush_sndbuf_sync(priv);
  556. spin_lock_bh(&priv->lock);
  557. /* NOTE: If the hangup was scheduled by ourself (from the iucv
  558. * path_servered callback [IUCV_SEVERED]), we have to clean up
  559. * our structure and to set state to TTY_CLOSED.
  560. * If the tty was hung up otherwise (e.g. vhangup()), then we
  561. * ignore this hangup and keep an established IUCV path open...
  562. * (...the reason is that we are not able to connect back to the
  563. * client if we disconnect on hang up) */
  564. priv->tty_state = TTY_CLOSED;
  565. if (priv->iucv_state == IUCV_SEVERED)
  566. hvc_iucv_cleanup(priv);
  567. spin_unlock_bh(&priv->lock);
  568. }
  569. /**
  570. * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
  571. * @hp: Pointer to the HVC device (struct hvc_struct)
  572. * @id: Additional data (originally passed to hvc_alloc):
  573. * the index of an struct hvc_iucv_private instance.
  574. *
  575. * This routine notifies the HVC back-end that the last tty device fd has been
  576. * closed. The function calls hvc_iucv_cleanup() to clean up the struct
  577. * hvc_iucv_private instance.
  578. *
  579. * Locking: struct hvc_iucv_private->lock
  580. */
  581. static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
  582. {
  583. struct hvc_iucv_private *priv;
  584. struct iucv_path *path;
  585. priv = hvc_iucv_get_private(id);
  586. if (!priv)
  587. return;
  588. flush_sndbuf_sync(priv);
  589. spin_lock_bh(&priv->lock);
  590. path = priv->path; /* save reference to IUCV path */
  591. priv->path = NULL;
  592. hvc_iucv_cleanup(priv);
  593. spin_unlock_bh(&priv->lock);
  594. /* sever IUCV path outside of priv->lock due to lock ordering of:
  595. * priv->lock <--> iucv_table_lock */
  596. if (path) {
  597. iucv_path_sever(path, NULL);
  598. iucv_path_free(path);
  599. }
  600. }
  601. /**
  602. * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
  603. * @ipvmid: Originating z/VM user ID (right padded with blanks)
  604. *
  605. * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
  606. * non-zero.
  607. */
  608. static int hvc_iucv_filter_connreq(u8 ipvmid[8])
  609. {
  610. size_t i;
  611. /* Note: default policy is ACCEPT if no filter is set */
  612. if (!hvc_iucv_filter_size)
  613. return 0;
  614. for (i = 0; i < hvc_iucv_filter_size; i++)
  615. if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
  616. return 0;
  617. return 1;
  618. }
  619. /**
  620. * hvc_iucv_path_pending() - IUCV handler to process a connection request.
  621. * @path: Pending path (struct iucv_path)
  622. * @ipvmid: z/VM system identifier of originator
  623. * @ipuser: User specified data for this path
  624. * (AF_IUCV: port/service name and originator port)
  625. *
  626. * The function uses the @ipuser data to determine if the pending path belongs
  627. * to a terminal managed by this device driver.
  628. * If the path belongs to this driver, ensure that the terminal is not accessed
  629. * multiple times (only one connection to a terminal is allowed).
  630. * If the terminal is not yet connected, the pending path is accepted and is
  631. * associated to the appropriate struct hvc_iucv_private instance.
  632. *
  633. * Returns 0 if @path belongs to a terminal managed by the this device driver;
  634. * otherwise returns -ENODEV in order to dispatch this path to other handlers.
  635. *
  636. * Locking: struct hvc_iucv_private->lock
  637. */
  638. static int hvc_iucv_path_pending(struct iucv_path *path,
  639. u8 ipvmid[8], u8 ipuser[16])
  640. {
  641. struct hvc_iucv_private *priv;
  642. u8 nuser_data[16];
  643. u8 vm_user_id[9];
  644. int i, rc;
  645. priv = NULL;
  646. for (i = 0; i < hvc_iucv_devices; i++)
  647. if (hvc_iucv_table[i] &&
  648. (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
  649. priv = hvc_iucv_table[i];
  650. break;
  651. }
  652. if (!priv)
  653. return -ENODEV;
  654. /* Enforce that ipvmid is allowed to connect to us */
  655. read_lock(&hvc_iucv_filter_lock);
  656. rc = hvc_iucv_filter_connreq(ipvmid);
  657. read_unlock(&hvc_iucv_filter_lock);
  658. if (rc) {
  659. iucv_path_sever(path, ipuser);
  660. iucv_path_free(path);
  661. memcpy(vm_user_id, ipvmid, 8);
  662. vm_user_id[8] = 0;
  663. pr_info("A connection request from z/VM user ID %s "
  664. "was refused\n", vm_user_id);
  665. return 0;
  666. }
  667. spin_lock(&priv->lock);
  668. /* If the terminal is already connected or being severed, then sever
  669. * this path to enforce that there is only ONE established communication
  670. * path per terminal. */
  671. if (priv->iucv_state != IUCV_DISCONN) {
  672. iucv_path_sever(path, ipuser);
  673. iucv_path_free(path);
  674. goto out_path_handled;
  675. }
  676. /* accept path */
  677. memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
  678. memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
  679. path->msglim = 0xffff; /* IUCV MSGLIMIT */
  680. path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
  681. rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
  682. if (rc) {
  683. iucv_path_sever(path, ipuser);
  684. iucv_path_free(path);
  685. goto out_path_handled;
  686. }
  687. priv->path = path;
  688. priv->iucv_state = IUCV_CONNECTED;
  689. /* flush buffered output data... */
  690. schedule_delayed_work(&priv->sndbuf_work, 5);
  691. out_path_handled:
  692. spin_unlock(&priv->lock);
  693. return 0;
  694. }
  695. /**
  696. * hvc_iucv_path_severed() - IUCV handler to process a path sever.
  697. * @path: Pending path (struct iucv_path)
  698. * @ipuser: User specified data for this path
  699. * (AF_IUCV: port/service name and originator port)
  700. *
  701. * This function calls the hvc_iucv_hangup() function for the
  702. * respective IUCV HVC terminal.
  703. *
  704. * Locking: struct hvc_iucv_private->lock
  705. */
  706. static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  707. {
  708. struct hvc_iucv_private *priv = path->private;
  709. hvc_iucv_hangup(priv);
  710. }
  711. /**
  712. * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
  713. * @path: Pending path (struct iucv_path)
  714. * @msg: Pointer to the IUCV message
  715. *
  716. * The function puts an incoming message on the input queue for later
  717. * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
  718. * If the tty has not yet been opened, the message is rejected.
  719. *
  720. * Locking: struct hvc_iucv_private->lock
  721. */
  722. static void hvc_iucv_msg_pending(struct iucv_path *path,
  723. struct iucv_message *msg)
  724. {
  725. struct hvc_iucv_private *priv = path->private;
  726. struct iucv_tty_buffer *rb;
  727. /* reject messages that exceed max size of iucv_tty_msg->datalen */
  728. if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
  729. iucv_message_reject(path, msg);
  730. return;
  731. }
  732. spin_lock(&priv->lock);
  733. /* reject messages if tty has not yet been opened */
  734. if (priv->tty_state == TTY_CLOSED) {
  735. iucv_message_reject(path, msg);
  736. goto unlock_return;
  737. }
  738. /* allocate tty buffer to save iucv msg only */
  739. rb = alloc_tty_buffer(0, GFP_ATOMIC);
  740. if (!rb) {
  741. iucv_message_reject(path, msg);
  742. goto unlock_return; /* -ENOMEM */
  743. }
  744. rb->msg = *msg;
  745. list_add_tail(&rb->list, &priv->tty_inqueue);
  746. hvc_kick(); /* wake up hvc thread */
  747. unlock_return:
  748. spin_unlock(&priv->lock);
  749. }
  750. /**
  751. * hvc_iucv_msg_complete() - IUCV handler to process message completion
  752. * @path: Pending path (struct iucv_path)
  753. * @msg: Pointer to the IUCV message
  754. *
  755. * The function is called upon completion of message delivery to remove the
  756. * message from the outqueue. Additional delivery information can be found
  757. * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
  758. * purged messages (0x010000 (IPADPGNR)).
  759. *
  760. * Locking: struct hvc_iucv_private->lock
  761. */
  762. static void hvc_iucv_msg_complete(struct iucv_path *path,
  763. struct iucv_message *msg)
  764. {
  765. struct hvc_iucv_private *priv = path->private;
  766. struct iucv_tty_buffer *ent, *next;
  767. LIST_HEAD(list_remove);
  768. spin_lock(&priv->lock);
  769. list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
  770. if (ent->msg.id == msg->id) {
  771. list_move(&ent->list, &list_remove);
  772. break;
  773. }
  774. wake_up(&priv->sndbuf_waitq);
  775. spin_unlock(&priv->lock);
  776. destroy_tty_buffer_list(&list_remove);
  777. }
  778. /**
  779. * hvc_iucv_pm_freeze() - Freeze PM callback
  780. * @dev: IUVC HVC terminal device
  781. *
  782. * Sever an established IUCV communication path and
  783. * trigger a hang-up of the underlying HVC terminal.
  784. */
  785. static int hvc_iucv_pm_freeze(struct device *dev)
  786. {
  787. struct hvc_iucv_private *priv = dev_get_drvdata(dev);
  788. local_bh_disable();
  789. hvc_iucv_hangup(priv);
  790. local_bh_enable();
  791. return 0;
  792. }
  793. /**
  794. * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
  795. * @dev: IUVC HVC terminal device
  796. *
  797. * Wake up the HVC thread to trigger hang-up and respective
  798. * HVC back-end notifier invocations.
  799. */
  800. static int hvc_iucv_pm_restore_thaw(struct device *dev)
  801. {
  802. hvc_kick();
  803. return 0;
  804. }
  805. /* HVC operations */
  806. static const struct hv_ops hvc_iucv_ops = {
  807. .get_chars = hvc_iucv_get_chars,
  808. .put_chars = hvc_iucv_put_chars,
  809. .notifier_add = hvc_iucv_notifier_add,
  810. .notifier_del = hvc_iucv_notifier_del,
  811. .notifier_hangup = hvc_iucv_notifier_hangup,
  812. };
  813. /* Suspend / resume device operations */
  814. static const struct dev_pm_ops hvc_iucv_pm_ops = {
  815. .freeze = hvc_iucv_pm_freeze,
  816. .thaw = hvc_iucv_pm_restore_thaw,
  817. .restore = hvc_iucv_pm_restore_thaw,
  818. };
  819. /* IUCV HVC device driver */
  820. static struct device_driver hvc_iucv_driver = {
  821. .name = KMSG_COMPONENT,
  822. .bus = &iucv_bus,
  823. .pm = &hvc_iucv_pm_ops,
  824. };
  825. /**
  826. * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
  827. * @id: hvc_iucv_table index
  828. * @is_console: Flag if the instance is used as Linux console
  829. *
  830. * This function allocates a new hvc_iucv_private structure and stores
  831. * the instance in hvc_iucv_table at index @id.
  832. * Returns 0 on success; otherwise non-zero.
  833. */
  834. static int __init hvc_iucv_alloc(int id, unsigned int is_console)
  835. {
  836. struct hvc_iucv_private *priv;
  837. char name[9];
  838. int rc;
  839. priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
  840. if (!priv)
  841. return -ENOMEM;
  842. spin_lock_init(&priv->lock);
  843. INIT_LIST_HEAD(&priv->tty_outqueue);
  844. INIT_LIST_HEAD(&priv->tty_inqueue);
  845. INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
  846. init_waitqueue_head(&priv->sndbuf_waitq);
  847. priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
  848. if (!priv->sndbuf) {
  849. kfree(priv);
  850. return -ENOMEM;
  851. }
  852. /* set console flag */
  853. priv->is_console = is_console;
  854. /* allocate hvc device */
  855. priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
  856. HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
  857. if (IS_ERR(priv->hvc)) {
  858. rc = PTR_ERR(priv->hvc);
  859. goto out_error_hvc;
  860. }
  861. /* notify HVC thread instead of using polling */
  862. priv->hvc->irq_requested = 1;
  863. /* setup iucv related information */
  864. snprintf(name, 9, "lnxhvc%-2d", id);
  865. memcpy(priv->srv_name, name, 8);
  866. ASCEBC(priv->srv_name, 8);
  867. /* create and setup device */
  868. priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
  869. if (!priv->dev) {
  870. rc = -ENOMEM;
  871. goto out_error_dev;
  872. }
  873. dev_set_name(priv->dev, "hvc_iucv%d", id);
  874. dev_set_drvdata(priv->dev, priv);
  875. priv->dev->bus = &iucv_bus;
  876. priv->dev->parent = iucv_root;
  877. priv->dev->driver = &hvc_iucv_driver;
  878. priv->dev->release = (void (*)(struct device *)) kfree;
  879. rc = device_register(priv->dev);
  880. if (rc) {
  881. put_device(priv->dev);
  882. goto out_error_dev;
  883. }
  884. hvc_iucv_table[id] = priv;
  885. return 0;
  886. out_error_dev:
  887. hvc_remove(priv->hvc);
  888. out_error_hvc:
  889. free_page((unsigned long) priv->sndbuf);
  890. kfree(priv);
  891. return rc;
  892. }
  893. /**
  894. * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
  895. */
  896. static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
  897. {
  898. hvc_remove(priv->hvc);
  899. device_unregister(priv->dev);
  900. free_page((unsigned long) priv->sndbuf);
  901. kfree(priv);
  902. }
  903. /**
  904. * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
  905. * @filter: String containing a comma-separated list of z/VM user IDs
  906. */
  907. static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
  908. {
  909. const char *nextdelim, *residual;
  910. size_t len;
  911. nextdelim = strchr(filter, ',');
  912. if (nextdelim) {
  913. len = nextdelim - filter;
  914. residual = nextdelim + 1;
  915. } else {
  916. len = strlen(filter);
  917. residual = filter + len;
  918. }
  919. if (len == 0)
  920. return ERR_PTR(-EINVAL);
  921. /* check for '\n' (if called from sysfs) */
  922. if (filter[len - 1] == '\n')
  923. len--;
  924. if (len > 8)
  925. return ERR_PTR(-EINVAL);
  926. /* pad with blanks and save upper case version of user ID */
  927. memset(dest, ' ', 8);
  928. while (len--)
  929. dest[len] = toupper(filter[len]);
  930. return residual;
  931. }
  932. /**
  933. * hvc_iucv_setup_filter() - Set up z/VM user ID filter
  934. * @filter: String consisting of a comma-separated list of z/VM user IDs
  935. *
  936. * The function parses the @filter string and creates an array containing
  937. * the list of z/VM user ID filter entries.
  938. * Return code 0 means success, -EINVAL if the filter is syntactically
  939. * incorrect, -ENOMEM if there was not enough memory to allocate the
  940. * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
  941. */
  942. static int hvc_iucv_setup_filter(const char *val)
  943. {
  944. const char *residual;
  945. int err;
  946. size_t size, count;
  947. void *array, *old_filter;
  948. count = strlen(val);
  949. if (count == 0 || (count == 1 && val[0] == '\n')) {
  950. size = 0;
  951. array = NULL;
  952. goto out_replace_filter; /* clear filter */
  953. }
  954. /* count user IDs in order to allocate sufficient memory */
  955. size = 1;
  956. residual = val;
  957. while ((residual = strchr(residual, ',')) != NULL) {
  958. residual++;
  959. size++;
  960. }
  961. /* check if the specified list exceeds the filter limit */
  962. if (size > MAX_VMID_FILTER)
  963. return -ENOSPC;
  964. array = kzalloc(size * 8, GFP_KERNEL);
  965. if (!array)
  966. return -ENOMEM;
  967. count = size;
  968. residual = val;
  969. while (*residual && count) {
  970. residual = hvc_iucv_parse_filter(residual,
  971. array + ((size - count) * 8));
  972. if (IS_ERR(residual)) {
  973. err = PTR_ERR(residual);
  974. kfree(array);
  975. goto out_err;
  976. }
  977. count--;
  978. }
  979. out_replace_filter:
  980. write_lock_bh(&hvc_iucv_filter_lock);
  981. old_filter = hvc_iucv_filter;
  982. hvc_iucv_filter_size = size;
  983. hvc_iucv_filter = array;
  984. write_unlock_bh(&hvc_iucv_filter_lock);
  985. kfree(old_filter);
  986. err = 0;
  987. out_err:
  988. return err;
  989. }
  990. /**
  991. * param_set_vmidfilter() - Set z/VM user ID filter parameter
  992. * @val: String consisting of a comma-separated list of z/VM user IDs
  993. * @kp: Kernel parameter pointing to hvc_iucv_filter array
  994. *
  995. * The function sets up the z/VM user ID filter specified as comma-separated
  996. * list of user IDs in @val.
  997. * Note: If it is called early in the boot process, @val is stored and
  998. * parsed later in hvc_iucv_init().
  999. */
  1000. static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
  1001. {
  1002. int rc;
  1003. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  1004. return -ENODEV;
  1005. if (!val)
  1006. return -EINVAL;
  1007. rc = 0;
  1008. if (slab_is_available())
  1009. rc = hvc_iucv_setup_filter(val);
  1010. else
  1011. hvc_iucv_filter_string = val; /* defer... */
  1012. return rc;
  1013. }
  1014. /**
  1015. * param_get_vmidfilter() - Get z/VM user ID filter
  1016. * @buffer: Buffer to store z/VM user ID filter,
  1017. * (buffer size assumption PAGE_SIZE)
  1018. * @kp: Kernel parameter pointing to the hvc_iucv_filter array
  1019. *
  1020. * The function stores the filter as a comma-separated list of z/VM user IDs
  1021. * in @buffer. Typically, sysfs routines call this function for attr show.
  1022. */
  1023. static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
  1024. {
  1025. int rc;
  1026. size_t index, len;
  1027. void *start, *end;
  1028. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  1029. return -ENODEV;
  1030. rc = 0;
  1031. read_lock_bh(&hvc_iucv_filter_lock);
  1032. for (index = 0; index < hvc_iucv_filter_size; index++) {
  1033. start = hvc_iucv_filter + (8 * index);
  1034. end = memchr(start, ' ', 8);
  1035. len = (end) ? end - start : 8;
  1036. memcpy(buffer + rc, start, len);
  1037. rc += len;
  1038. buffer[rc++] = ',';
  1039. }
  1040. read_unlock_bh(&hvc_iucv_filter_lock);
  1041. if (rc)
  1042. buffer[--rc] = '\0'; /* replace last comma and update rc */
  1043. return rc;
  1044. }
  1045. #define param_check_vmidfilter(name, p) __param_check(name, p, void)
  1046. static struct kernel_param_ops param_ops_vmidfilter = {
  1047. .set = param_set_vmidfilter,
  1048. .get = param_get_vmidfilter,
  1049. };
  1050. /**
  1051. * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
  1052. */
  1053. static int __init hvc_iucv_init(void)
  1054. {
  1055. int rc;
  1056. unsigned int i;
  1057. if (!hvc_iucv_devices)
  1058. return -ENODEV;
  1059. if (!MACHINE_IS_VM) {
  1060. pr_notice("The z/VM IUCV HVC device driver cannot "
  1061. "be used without z/VM\n");
  1062. rc = -ENODEV;
  1063. goto out_error;
  1064. }
  1065. if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
  1066. pr_err("%lu is not a valid value for the hvc_iucv= "
  1067. "kernel parameter\n", hvc_iucv_devices);
  1068. rc = -EINVAL;
  1069. goto out_error;
  1070. }
  1071. /* register IUCV HVC device driver */
  1072. rc = driver_register(&hvc_iucv_driver);
  1073. if (rc)
  1074. goto out_error;
  1075. /* parse hvc_iucv_allow string and create z/VM user ID filter list */
  1076. if (hvc_iucv_filter_string) {
  1077. rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
  1078. switch (rc) {
  1079. case 0:
  1080. break;
  1081. case -ENOMEM:
  1082. pr_err("Allocating memory failed with "
  1083. "reason code=%d\n", 3);
  1084. goto out_error;
  1085. case -EINVAL:
  1086. pr_err("hvc_iucv_allow= does not specify a valid "
  1087. "z/VM user ID list\n");
  1088. goto out_error;
  1089. case -ENOSPC:
  1090. pr_err("hvc_iucv_allow= specifies too many "
  1091. "z/VM user IDs\n");
  1092. goto out_error;
  1093. default:
  1094. goto out_error;
  1095. }
  1096. }
  1097. hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
  1098. sizeof(struct iucv_tty_buffer),
  1099. 0, 0, NULL);
  1100. if (!hvc_iucv_buffer_cache) {
  1101. pr_err("Allocating memory failed with reason code=%d\n", 1);
  1102. rc = -ENOMEM;
  1103. goto out_error;
  1104. }
  1105. hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
  1106. hvc_iucv_buffer_cache);
  1107. if (!hvc_iucv_mempool) {
  1108. pr_err("Allocating memory failed with reason code=%d\n", 2);
  1109. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1110. rc = -ENOMEM;
  1111. goto out_error;
  1112. }
  1113. /* register the first terminal device as console
  1114. * (must be done before allocating hvc terminal devices) */
  1115. rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
  1116. if (rc) {
  1117. pr_err("Registering HVC terminal device as "
  1118. "Linux console failed\n");
  1119. goto out_error_memory;
  1120. }
  1121. /* allocate hvc_iucv_private structs */
  1122. for (i = 0; i < hvc_iucv_devices; i++) {
  1123. rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
  1124. if (rc) {
  1125. pr_err("Creating a new HVC terminal device "
  1126. "failed with error code=%d\n", rc);
  1127. goto out_error_hvc;
  1128. }
  1129. }
  1130. /* register IUCV callback handler */
  1131. rc = iucv_register(&hvc_iucv_handler, 0);
  1132. if (rc) {
  1133. pr_err("Registering IUCV handlers failed with error code=%d\n",
  1134. rc);
  1135. goto out_error_hvc;
  1136. }
  1137. return 0;
  1138. out_error_hvc:
  1139. for (i = 0; i < hvc_iucv_devices; i++)
  1140. if (hvc_iucv_table[i])
  1141. hvc_iucv_destroy(hvc_iucv_table[i]);
  1142. out_error_memory:
  1143. mempool_destroy(hvc_iucv_mempool);
  1144. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1145. out_error:
  1146. if (hvc_iucv_filter)
  1147. kfree(hvc_iucv_filter);
  1148. hvc_iucv_devices = 0; /* ensure that we do not provide any device */
  1149. return rc;
  1150. }
  1151. /**
  1152. * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
  1153. * @val: Parameter value (numeric)
  1154. */
  1155. static int __init hvc_iucv_config(char *val)
  1156. {
  1157. return strict_strtoul(val, 10, &hvc_iucv_devices);
  1158. }
  1159. device_initcall(hvc_iucv_init);
  1160. __setup("hvc_iucv=", hvc_iucv_config);
  1161. core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);