vmlogrdr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906
  1. /*
  2. * character device driver for reading z/VM system service records
  3. *
  4. *
  5. * Copyright IBM Corp. 2004, 2009
  6. * character device driver for reading z/VM system service records,
  7. * Version 1.0
  8. * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
  9. * Stefan Weinhuber <wein@de.ibm.com>
  10. *
  11. */
  12. #define KMSG_COMPONENT "vmlogrdr"
  13. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/errno.h>
  18. #include <linux/types.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/atomic.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/cpcmd.h>
  24. #include <asm/debug.h>
  25. #include <asm/ebcdic.h>
  26. #include <net/iucv/iucv.h>
  27. #include <linux/kmod.h>
  28. #include <linux/cdev.h>
  29. #include <linux/device.h>
  30. #include <linux/string.h>
  31. MODULE_AUTHOR
  32. ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
  33. " Stefan Weinhuber (wein@de.ibm.com)");
  34. MODULE_DESCRIPTION ("Character device driver for reading z/VM "
  35. "system service records.");
  36. MODULE_LICENSE("GPL");
  37. /*
  38. * The size of the buffer for iucv data transfer is one page,
  39. * but in addition to the data we read from iucv we also
  40. * place an integer and some characters into that buffer,
  41. * so the maximum size for record data is a little less then
  42. * one page.
  43. */
  44. #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
  45. /*
  46. * The elements that are concurrently accessed by bottom halves are
  47. * connection_established, iucv_path_severed, local_interrupt_buffer
  48. * and receive_ready. The first three can be protected by
  49. * priv_lock. receive_ready is atomic, so it can be incremented and
  50. * decremented without holding a lock.
  51. * The variable dev_in_use needs to be protected by the lock, since
  52. * it's a flag used by open to make sure that the device is opened only
  53. * by one user at the same time.
  54. */
  55. struct vmlogrdr_priv_t {
  56. char system_service[8];
  57. char internal_name[8];
  58. char recording_name[8];
  59. struct iucv_path *path;
  60. int connection_established;
  61. int iucv_path_severed;
  62. struct iucv_message local_interrupt_buffer;
  63. atomic_t receive_ready;
  64. int minor_num;
  65. char * buffer;
  66. char * current_position;
  67. int remaining;
  68. ulong residual_length;
  69. int buffer_free;
  70. int dev_in_use; /* 1: already opened, 0: not opened*/
  71. spinlock_t priv_lock;
  72. struct device *device;
  73. struct device *class_device;
  74. int autorecording;
  75. int autopurge;
  76. };
  77. /*
  78. * File operation structure for vmlogrdr devices
  79. */
  80. static int vmlogrdr_open(struct inode *, struct file *);
  81. static int vmlogrdr_release(struct inode *, struct file *);
  82. static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
  83. size_t count, loff_t * ppos);
  84. static const struct file_operations vmlogrdr_fops = {
  85. .owner = THIS_MODULE,
  86. .open = vmlogrdr_open,
  87. .release = vmlogrdr_release,
  88. .read = vmlogrdr_read,
  89. .llseek = no_llseek,
  90. };
  91. static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
  92. static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
  93. static void vmlogrdr_iucv_message_pending(struct iucv_path *,
  94. struct iucv_message *);
  95. static struct iucv_handler vmlogrdr_iucv_handler = {
  96. .path_complete = vmlogrdr_iucv_path_complete,
  97. .path_severed = vmlogrdr_iucv_path_severed,
  98. .message_pending = vmlogrdr_iucv_message_pending,
  99. };
  100. static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
  101. static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
  102. /*
  103. * pointer to system service private structure
  104. * minor number 0 --> logrec
  105. * minor number 1 --> account
  106. * minor number 2 --> symptom
  107. */
  108. static struct vmlogrdr_priv_t sys_ser[] = {
  109. { .system_service = "*LOGREC ",
  110. .internal_name = "logrec",
  111. .recording_name = "EREP",
  112. .minor_num = 0,
  113. .buffer_free = 1,
  114. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
  115. .autorecording = 1,
  116. .autopurge = 1,
  117. },
  118. { .system_service = "*ACCOUNT",
  119. .internal_name = "account",
  120. .recording_name = "ACCOUNT",
  121. .minor_num = 1,
  122. .buffer_free = 1,
  123. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
  124. .autorecording = 1,
  125. .autopurge = 1,
  126. },
  127. { .system_service = "*SYMPTOM",
  128. .internal_name = "symptom",
  129. .recording_name = "SYMPTOM",
  130. .minor_num = 2,
  131. .buffer_free = 1,
  132. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
  133. .autorecording = 1,
  134. .autopurge = 1,
  135. }
  136. };
  137. #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
  138. static char FENCE[] = {"EOR"};
  139. static int vmlogrdr_major = 0;
  140. static struct cdev *vmlogrdr_cdev = NULL;
  141. static int recording_class_AB;
  142. static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
  143. {
  144. struct vmlogrdr_priv_t * logptr = path->private;
  145. spin_lock(&logptr->priv_lock);
  146. logptr->connection_established = 1;
  147. spin_unlock(&logptr->priv_lock);
  148. wake_up(&conn_wait_queue);
  149. }
  150. static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
  151. {
  152. struct vmlogrdr_priv_t * logptr = path->private;
  153. u8 reason = (u8) ipuser[8];
  154. pr_err("vmlogrdr: connection severed with reason %i\n", reason);
  155. iucv_path_sever(path, NULL);
  156. kfree(path);
  157. logptr->path = NULL;
  158. spin_lock(&logptr->priv_lock);
  159. logptr->connection_established = 0;
  160. logptr->iucv_path_severed = 1;
  161. spin_unlock(&logptr->priv_lock);
  162. wake_up(&conn_wait_queue);
  163. /* just in case we're sleeping waiting for a record */
  164. wake_up_interruptible(&read_wait_queue);
  165. }
  166. static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
  167. struct iucv_message *msg)
  168. {
  169. struct vmlogrdr_priv_t * logptr = path->private;
  170. /*
  171. * This function is the bottom half so it should be quick.
  172. * Copy the external interrupt data into our local eib and increment
  173. * the usage count
  174. */
  175. spin_lock(&logptr->priv_lock);
  176. memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
  177. atomic_inc(&logptr->receive_ready);
  178. spin_unlock(&logptr->priv_lock);
  179. wake_up_interruptible(&read_wait_queue);
  180. }
  181. static int vmlogrdr_get_recording_class_AB(void)
  182. {
  183. static const char cp_command[] = "QUERY COMMAND RECORDING ";
  184. char cp_response[80];
  185. char *tail;
  186. int len,i;
  187. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  188. len = strnlen(cp_response,sizeof(cp_response));
  189. // now the parsing
  190. tail=strnchr(cp_response,len,'=');
  191. if (!tail)
  192. return 0;
  193. tail++;
  194. if (!strncmp("ANY",tail,3))
  195. return 1;
  196. if (!strncmp("NONE",tail,4))
  197. return 0;
  198. /*
  199. * expect comma separated list of classes here, if one of them
  200. * is A or B return 1 otherwise 0
  201. */
  202. for (i=tail-cp_response; i<len; i++)
  203. if ( cp_response[i]=='A' || cp_response[i]=='B' )
  204. return 1;
  205. return 0;
  206. }
  207. static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
  208. int action, int purge)
  209. {
  210. char cp_command[80];
  211. char cp_response[160];
  212. char *onoff, *qid_string;
  213. int rc;
  214. onoff = ((action == 1) ? "ON" : "OFF");
  215. qid_string = ((recording_class_AB == 1) ? " QID * " : "");
  216. /*
  217. * The recording commands needs to be called with option QID
  218. * for guests that have previlege classes A or B.
  219. * Purging has to be done as separate step, because recording
  220. * can't be switched on as long as records are on the queue.
  221. * Doing both at the same time doesn't work.
  222. */
  223. if (purge && (action == 1)) {
  224. memset(cp_command, 0x00, sizeof(cp_command));
  225. memset(cp_response, 0x00, sizeof(cp_response));
  226. snprintf(cp_command, sizeof(cp_command),
  227. "RECORDING %s PURGE %s",
  228. logptr->recording_name,
  229. qid_string);
  230. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  231. }
  232. memset(cp_command, 0x00, sizeof(cp_command));
  233. memset(cp_response, 0x00, sizeof(cp_response));
  234. snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
  235. logptr->recording_name,
  236. onoff,
  237. qid_string);
  238. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  239. /* The recording command will usually answer with 'Command complete'
  240. * on success, but when the specific service was never connected
  241. * before then there might be an additional informational message
  242. * 'HCPCRC8072I Recording entry not found' before the
  243. * 'Command complete'. So I use strstr rather then the strncmp.
  244. */
  245. if (strstr(cp_response,"Command complete"))
  246. rc = 0;
  247. else
  248. rc = -EIO;
  249. /*
  250. * If we turn recording off, we have to purge any remaining records
  251. * afterwards, as a large number of queued records may impact z/VM
  252. * performance.
  253. */
  254. if (purge && (action == 0)) {
  255. memset(cp_command, 0x00, sizeof(cp_command));
  256. memset(cp_response, 0x00, sizeof(cp_response));
  257. snprintf(cp_command, sizeof(cp_command),
  258. "RECORDING %s PURGE %s",
  259. logptr->recording_name,
  260. qid_string);
  261. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  262. }
  263. return rc;
  264. }
  265. static int vmlogrdr_open (struct inode *inode, struct file *filp)
  266. {
  267. int dev_num = 0;
  268. struct vmlogrdr_priv_t * logptr = NULL;
  269. int connect_rc = 0;
  270. int ret;
  271. dev_num = iminor(inode);
  272. if (dev_num >= MAXMINOR)
  273. return -ENODEV;
  274. logptr = &sys_ser[dev_num];
  275. /*
  276. * only allow for blocking reads to be open
  277. */
  278. if (filp->f_flags & O_NONBLOCK)
  279. return -EOPNOTSUPP;
  280. /* Besure this device hasn't already been opened */
  281. spin_lock_bh(&logptr->priv_lock);
  282. if (logptr->dev_in_use) {
  283. spin_unlock_bh(&logptr->priv_lock);
  284. return -EBUSY;
  285. }
  286. logptr->dev_in_use = 1;
  287. logptr->connection_established = 0;
  288. logptr->iucv_path_severed = 0;
  289. atomic_set(&logptr->receive_ready, 0);
  290. logptr->buffer_free = 1;
  291. spin_unlock_bh(&logptr->priv_lock);
  292. /* set the file options */
  293. filp->private_data = logptr;
  294. /* start recording for this service*/
  295. if (logptr->autorecording) {
  296. ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
  297. if (ret)
  298. pr_warn("vmlogrdr: failed to start recording automatically\n");
  299. }
  300. /* create connection to the system service */
  301. logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
  302. if (!logptr->path)
  303. goto out_dev;
  304. connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
  305. logptr->system_service, NULL, NULL,
  306. logptr);
  307. if (connect_rc) {
  308. pr_err("vmlogrdr: iucv connection to %s "
  309. "failed with rc %i \n",
  310. logptr->system_service, connect_rc);
  311. goto out_path;
  312. }
  313. /* We've issued the connect and now we must wait for a
  314. * ConnectionComplete or ConnectinSevered Interrupt
  315. * before we can continue to process.
  316. */
  317. wait_event(conn_wait_queue, (logptr->connection_established)
  318. || (logptr->iucv_path_severed));
  319. if (logptr->iucv_path_severed)
  320. goto out_record;
  321. nonseekable_open(inode, filp);
  322. return 0;
  323. out_record:
  324. if (logptr->autorecording)
  325. vmlogrdr_recording(logptr,0,logptr->autopurge);
  326. out_path:
  327. kfree(logptr->path); /* kfree(NULL) is ok. */
  328. logptr->path = NULL;
  329. out_dev:
  330. logptr->dev_in_use = 0;
  331. return -EIO;
  332. }
  333. static int vmlogrdr_release (struct inode *inode, struct file *filp)
  334. {
  335. int ret;
  336. struct vmlogrdr_priv_t * logptr = filp->private_data;
  337. iucv_path_sever(logptr->path, NULL);
  338. kfree(logptr->path);
  339. logptr->path = NULL;
  340. if (logptr->autorecording) {
  341. ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
  342. if (ret)
  343. pr_warn("vmlogrdr: failed to stop recording automatically\n");
  344. }
  345. logptr->dev_in_use = 0;
  346. return 0;
  347. }
  348. static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
  349. {
  350. int rc, *temp;
  351. /* we need to keep track of two data sizes here:
  352. * The number of bytes we need to receive from iucv and
  353. * the total number of bytes we actually write into the buffer.
  354. */
  355. int user_data_count, iucv_data_count;
  356. char * buffer;
  357. if (atomic_read(&priv->receive_ready)) {
  358. spin_lock_bh(&priv->priv_lock);
  359. if (priv->residual_length){
  360. /* receive second half of a record */
  361. iucv_data_count = priv->residual_length;
  362. user_data_count = 0;
  363. buffer = priv->buffer;
  364. } else {
  365. /* receive a new record:
  366. * We need to return the total length of the record
  367. * + size of FENCE in the first 4 bytes of the buffer.
  368. */
  369. iucv_data_count = priv->local_interrupt_buffer.length;
  370. user_data_count = sizeof(int);
  371. temp = (int*)priv->buffer;
  372. *temp= iucv_data_count + sizeof(FENCE);
  373. buffer = priv->buffer + sizeof(int);
  374. }
  375. /*
  376. * If the record is bigger than our buffer, we receive only
  377. * a part of it. We can get the rest later.
  378. */
  379. if (iucv_data_count > NET_BUFFER_SIZE)
  380. iucv_data_count = NET_BUFFER_SIZE;
  381. rc = iucv_message_receive(priv->path,
  382. &priv->local_interrupt_buffer,
  383. 0, buffer, iucv_data_count,
  384. &priv->residual_length);
  385. spin_unlock_bh(&priv->priv_lock);
  386. /* An rc of 5 indicates that the record was bigger than
  387. * the buffer, which is OK for us. A 9 indicates that the
  388. * record was purged befor we could receive it.
  389. */
  390. if (rc == 5)
  391. rc = 0;
  392. if (rc == 9)
  393. atomic_set(&priv->receive_ready, 0);
  394. } else {
  395. rc = 1;
  396. }
  397. if (!rc) {
  398. priv->buffer_free = 0;
  399. user_data_count += iucv_data_count;
  400. priv->current_position = priv->buffer;
  401. if (priv->residual_length == 0){
  402. /* the whole record has been captured,
  403. * now add the fence */
  404. atomic_dec(&priv->receive_ready);
  405. buffer = priv->buffer + user_data_count;
  406. memcpy(buffer, FENCE, sizeof(FENCE));
  407. user_data_count += sizeof(FENCE);
  408. }
  409. priv->remaining = user_data_count;
  410. }
  411. return rc;
  412. }
  413. static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
  414. size_t count, loff_t * ppos)
  415. {
  416. int rc;
  417. struct vmlogrdr_priv_t * priv = filp->private_data;
  418. while (priv->buffer_free) {
  419. rc = vmlogrdr_receive_data(priv);
  420. if (rc) {
  421. rc = wait_event_interruptible(read_wait_queue,
  422. atomic_read(&priv->receive_ready));
  423. if (rc)
  424. return rc;
  425. }
  426. }
  427. /* copy only up to end of record */
  428. if (count > priv->remaining)
  429. count = priv->remaining;
  430. if (copy_to_user(data, priv->current_position, count))
  431. return -EFAULT;
  432. *ppos += count;
  433. priv->current_position += count;
  434. priv->remaining -= count;
  435. /* if all data has been transferred, set buffer free */
  436. if (priv->remaining == 0)
  437. priv->buffer_free = 1;
  438. return count;
  439. }
  440. static ssize_t vmlogrdr_autopurge_store(struct device * dev,
  441. struct device_attribute *attr,
  442. const char * buf, size_t count)
  443. {
  444. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  445. ssize_t ret = count;
  446. switch (buf[0]) {
  447. case '0':
  448. priv->autopurge=0;
  449. break;
  450. case '1':
  451. priv->autopurge=1;
  452. break;
  453. default:
  454. ret = -EINVAL;
  455. }
  456. return ret;
  457. }
  458. static ssize_t vmlogrdr_autopurge_show(struct device *dev,
  459. struct device_attribute *attr,
  460. char *buf)
  461. {
  462. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  463. return sprintf(buf, "%u\n", priv->autopurge);
  464. }
  465. static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
  466. vmlogrdr_autopurge_store);
  467. static ssize_t vmlogrdr_purge_store(struct device * dev,
  468. struct device_attribute *attr,
  469. const char * buf, size_t count)
  470. {
  471. char cp_command[80];
  472. char cp_response[80];
  473. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  474. if (buf[0] != '1')
  475. return -EINVAL;
  476. memset(cp_command, 0x00, sizeof(cp_command));
  477. memset(cp_response, 0x00, sizeof(cp_response));
  478. /*
  479. * The recording command needs to be called with option QID
  480. * for guests that have previlege classes A or B.
  481. * Other guests will not recognize the command and we have to
  482. * issue the same command without the QID parameter.
  483. */
  484. if (recording_class_AB)
  485. snprintf(cp_command, sizeof(cp_command),
  486. "RECORDING %s PURGE QID * ",
  487. priv->recording_name);
  488. else
  489. snprintf(cp_command, sizeof(cp_command),
  490. "RECORDING %s PURGE ",
  491. priv->recording_name);
  492. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  493. return count;
  494. }
  495. static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
  496. static ssize_t vmlogrdr_autorecording_store(struct device *dev,
  497. struct device_attribute *attr,
  498. const char *buf, size_t count)
  499. {
  500. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  501. ssize_t ret = count;
  502. switch (buf[0]) {
  503. case '0':
  504. priv->autorecording=0;
  505. break;
  506. case '1':
  507. priv->autorecording=1;
  508. break;
  509. default:
  510. ret = -EINVAL;
  511. }
  512. return ret;
  513. }
  514. static ssize_t vmlogrdr_autorecording_show(struct device *dev,
  515. struct device_attribute *attr,
  516. char *buf)
  517. {
  518. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  519. return sprintf(buf, "%u\n", priv->autorecording);
  520. }
  521. static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
  522. vmlogrdr_autorecording_store);
  523. static ssize_t vmlogrdr_recording_store(struct device * dev,
  524. struct device_attribute *attr,
  525. const char * buf, size_t count)
  526. {
  527. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  528. ssize_t ret;
  529. switch (buf[0]) {
  530. case '0':
  531. ret = vmlogrdr_recording(priv,0,0);
  532. break;
  533. case '1':
  534. ret = vmlogrdr_recording(priv,1,0);
  535. break;
  536. default:
  537. ret = -EINVAL;
  538. }
  539. if (ret)
  540. return ret;
  541. else
  542. return count;
  543. }
  544. static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
  545. static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
  546. char *buf)
  547. {
  548. static const char cp_command[] = "QUERY RECORDING ";
  549. int len;
  550. cpcmd(cp_command, buf, 4096, NULL);
  551. len = strlen(buf);
  552. return len;
  553. }
  554. static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
  555. NULL);
  556. static struct attribute *vmlogrdr_drv_attrs[] = {
  557. &driver_attr_recording_status.attr,
  558. NULL,
  559. };
  560. static struct attribute_group vmlogrdr_drv_attr_group = {
  561. .attrs = vmlogrdr_drv_attrs,
  562. };
  563. static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
  564. &vmlogrdr_drv_attr_group,
  565. NULL,
  566. };
  567. static struct attribute *vmlogrdr_attrs[] = {
  568. &dev_attr_autopurge.attr,
  569. &dev_attr_purge.attr,
  570. &dev_attr_autorecording.attr,
  571. &dev_attr_recording.attr,
  572. NULL,
  573. };
  574. static struct attribute_group vmlogrdr_attr_group = {
  575. .attrs = vmlogrdr_attrs,
  576. };
  577. static const struct attribute_group *vmlogrdr_attr_groups[] = {
  578. &vmlogrdr_attr_group,
  579. NULL,
  580. };
  581. static int vmlogrdr_pm_prepare(struct device *dev)
  582. {
  583. int rc;
  584. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  585. rc = 0;
  586. if (priv) {
  587. spin_lock_bh(&priv->priv_lock);
  588. if (priv->dev_in_use)
  589. rc = -EBUSY;
  590. spin_unlock_bh(&priv->priv_lock);
  591. }
  592. if (rc)
  593. pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
  594. dev_name(dev));
  595. return rc;
  596. }
  597. static const struct dev_pm_ops vmlogrdr_pm_ops = {
  598. .prepare = vmlogrdr_pm_prepare,
  599. };
  600. static struct class *vmlogrdr_class;
  601. static struct device_driver vmlogrdr_driver = {
  602. .name = "vmlogrdr",
  603. .bus = &iucv_bus,
  604. .pm = &vmlogrdr_pm_ops,
  605. .groups = vmlogrdr_drv_attr_groups,
  606. };
  607. static int vmlogrdr_register_driver(void)
  608. {
  609. int ret;
  610. /* Register with iucv driver */
  611. ret = iucv_register(&vmlogrdr_iucv_handler, 1);
  612. if (ret)
  613. goto out;
  614. ret = driver_register(&vmlogrdr_driver);
  615. if (ret)
  616. goto out_iucv;
  617. vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
  618. if (IS_ERR(vmlogrdr_class)) {
  619. ret = PTR_ERR(vmlogrdr_class);
  620. vmlogrdr_class = NULL;
  621. goto out_driver;
  622. }
  623. return 0;
  624. out_driver:
  625. driver_unregister(&vmlogrdr_driver);
  626. out_iucv:
  627. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  628. out:
  629. return ret;
  630. }
  631. static void vmlogrdr_unregister_driver(void)
  632. {
  633. class_destroy(vmlogrdr_class);
  634. vmlogrdr_class = NULL;
  635. driver_unregister(&vmlogrdr_driver);
  636. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  637. }
  638. static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
  639. {
  640. struct device *dev;
  641. int ret;
  642. dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  643. if (dev) {
  644. dev_set_name(dev, "%s", priv->internal_name);
  645. dev->bus = &iucv_bus;
  646. dev->parent = iucv_root;
  647. dev->driver = &vmlogrdr_driver;
  648. dev->groups = vmlogrdr_attr_groups;
  649. dev_set_drvdata(dev, priv);
  650. /*
  651. * The release function could be called after the
  652. * module has been unloaded. It's _only_ task is to
  653. * free the struct. Therefore, we specify kfree()
  654. * directly here. (Probably a little bit obfuscating
  655. * but legitime ...).
  656. */
  657. dev->release = (void (*)(struct device *))kfree;
  658. } else
  659. return -ENOMEM;
  660. ret = device_register(dev);
  661. if (ret) {
  662. put_device(dev);
  663. return ret;
  664. }
  665. priv->class_device = device_create(vmlogrdr_class, dev,
  666. MKDEV(vmlogrdr_major,
  667. priv->minor_num),
  668. priv, "%s", dev_name(dev));
  669. if (IS_ERR(priv->class_device)) {
  670. ret = PTR_ERR(priv->class_device);
  671. priv->class_device=NULL;
  672. device_unregister(dev);
  673. return ret;
  674. }
  675. priv->device = dev;
  676. return 0;
  677. }
  678. static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
  679. {
  680. device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
  681. if (priv->device != NULL) {
  682. device_unregister(priv->device);
  683. priv->device=NULL;
  684. }
  685. return 0;
  686. }
  687. static int vmlogrdr_register_cdev(dev_t dev)
  688. {
  689. int rc = 0;
  690. vmlogrdr_cdev = cdev_alloc();
  691. if (!vmlogrdr_cdev) {
  692. return -ENOMEM;
  693. }
  694. vmlogrdr_cdev->owner = THIS_MODULE;
  695. vmlogrdr_cdev->ops = &vmlogrdr_fops;
  696. vmlogrdr_cdev->dev = dev;
  697. rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
  698. if (!rc)
  699. return 0;
  700. // cleanup: cdev is not fully registered, no cdev_del here!
  701. kobject_put(&vmlogrdr_cdev->kobj);
  702. vmlogrdr_cdev=NULL;
  703. return rc;
  704. }
  705. static void vmlogrdr_cleanup(void)
  706. {
  707. int i;
  708. if (vmlogrdr_cdev) {
  709. cdev_del(vmlogrdr_cdev);
  710. vmlogrdr_cdev=NULL;
  711. }
  712. for (i=0; i < MAXMINOR; ++i ) {
  713. vmlogrdr_unregister_device(&sys_ser[i]);
  714. free_page((unsigned long)sys_ser[i].buffer);
  715. }
  716. vmlogrdr_unregister_driver();
  717. if (vmlogrdr_major) {
  718. unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
  719. vmlogrdr_major=0;
  720. }
  721. }
  722. static int __init vmlogrdr_init(void)
  723. {
  724. int rc;
  725. int i;
  726. dev_t dev;
  727. if (! MACHINE_IS_VM) {
  728. pr_err("not running under VM, driver not loaded.\n");
  729. return -ENODEV;
  730. }
  731. recording_class_AB = vmlogrdr_get_recording_class_AB();
  732. rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
  733. if (rc)
  734. return rc;
  735. vmlogrdr_major = MAJOR(dev);
  736. rc=vmlogrdr_register_driver();
  737. if (rc)
  738. goto cleanup;
  739. for (i=0; i < MAXMINOR; ++i ) {
  740. sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  741. if (!sys_ser[i].buffer) {
  742. rc = -ENOMEM;
  743. break;
  744. }
  745. sys_ser[i].current_position = sys_ser[i].buffer;
  746. rc=vmlogrdr_register_device(&sys_ser[i]);
  747. if (rc)
  748. break;
  749. }
  750. if (rc)
  751. goto cleanup;
  752. rc = vmlogrdr_register_cdev(dev);
  753. if (rc)
  754. goto cleanup;
  755. return 0;
  756. cleanup:
  757. vmlogrdr_cleanup();
  758. return rc;
  759. }
  760. static void __exit vmlogrdr_exit(void)
  761. {
  762. vmlogrdr_cleanup();
  763. return;
  764. }
  765. module_init(vmlogrdr_init);
  766. module_exit(vmlogrdr_exit);