vmlogrdr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * drivers/s390/char/vmlogrdr.c
  3. * character device driver for reading z/VM system service records
  4. *
  5. *
  6. * Copyright IBM Corp. 2004, 2009
  7. * character device driver for reading z/VM system service records,
  8. * Version 1.0
  9. * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
  10. * Stefan Weinhuber <wein@de.ibm.com>
  11. *
  12. */
  13. #define KMSG_COMPONENT "vmlogrdr"
  14. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/slab.h>
  18. #include <linux/errno.h>
  19. #include <linux/types.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/spinlock.h>
  22. #include <asm/atomic.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/cpcmd.h>
  25. #include <asm/debug.h>
  26. #include <asm/ebcdic.h>
  27. #include <net/iucv/iucv.h>
  28. #include <linux/kmod.h>
  29. #include <linux/cdev.h>
  30. #include <linux/device.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/string.h>
  33. MODULE_AUTHOR
  34. ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
  35. " Stefan Weinhuber (wein@de.ibm.com)");
  36. MODULE_DESCRIPTION ("Character device driver for reading z/VM "
  37. "system service records.");
  38. MODULE_LICENSE("GPL");
  39. /*
  40. * The size of the buffer for iucv data transfer is one page,
  41. * but in addition to the data we read from iucv we also
  42. * place an integer and some characters into that buffer,
  43. * so the maximum size for record data is a little less then
  44. * one page.
  45. */
  46. #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
  47. /*
  48. * The elements that are concurrently accessed by bottom halves are
  49. * connection_established, iucv_path_severed, local_interrupt_buffer
  50. * and receive_ready. The first three can be protected by
  51. * priv_lock. receive_ready is atomic, so it can be incremented and
  52. * decremented without holding a lock.
  53. * The variable dev_in_use needs to be protected by the lock, since
  54. * it's a flag used by open to make sure that the device is opened only
  55. * by one user at the same time.
  56. */
  57. struct vmlogrdr_priv_t {
  58. char system_service[8];
  59. char internal_name[8];
  60. char recording_name[8];
  61. struct iucv_path *path;
  62. int connection_established;
  63. int iucv_path_severed;
  64. struct iucv_message local_interrupt_buffer;
  65. atomic_t receive_ready;
  66. int minor_num;
  67. char * buffer;
  68. char * current_position;
  69. int remaining;
  70. ulong residual_length;
  71. int buffer_free;
  72. int dev_in_use; /* 1: already opened, 0: not opened*/
  73. spinlock_t priv_lock;
  74. struct device *device;
  75. struct device *class_device;
  76. int autorecording;
  77. int autopurge;
  78. };
  79. /*
  80. * File operation structure for vmlogrdr devices
  81. */
  82. static int vmlogrdr_open(struct inode *, struct file *);
  83. static int vmlogrdr_release(struct inode *, struct file *);
  84. static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
  85. size_t count, loff_t * ppos);
  86. static const struct file_operations vmlogrdr_fops = {
  87. .owner = THIS_MODULE,
  88. .open = vmlogrdr_open,
  89. .release = vmlogrdr_release,
  90. .read = vmlogrdr_read,
  91. .llseek = no_llseek,
  92. };
  93. static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
  94. static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
  95. static void vmlogrdr_iucv_message_pending(struct iucv_path *,
  96. struct iucv_message *);
  97. static struct iucv_handler vmlogrdr_iucv_handler = {
  98. .path_complete = vmlogrdr_iucv_path_complete,
  99. .path_severed = vmlogrdr_iucv_path_severed,
  100. .message_pending = vmlogrdr_iucv_message_pending,
  101. };
  102. static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
  103. static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
  104. /*
  105. * pointer to system service private structure
  106. * minor number 0 --> logrec
  107. * minor number 1 --> account
  108. * minor number 2 --> symptom
  109. */
  110. static struct vmlogrdr_priv_t sys_ser[] = {
  111. { .system_service = "*LOGREC ",
  112. .internal_name = "logrec",
  113. .recording_name = "EREP",
  114. .minor_num = 0,
  115. .buffer_free = 1,
  116. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
  117. .autorecording = 1,
  118. .autopurge = 1,
  119. },
  120. { .system_service = "*ACCOUNT",
  121. .internal_name = "account",
  122. .recording_name = "ACCOUNT",
  123. .minor_num = 1,
  124. .buffer_free = 1,
  125. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
  126. .autorecording = 1,
  127. .autopurge = 1,
  128. },
  129. { .system_service = "*SYMPTOM",
  130. .internal_name = "symptom",
  131. .recording_name = "SYMPTOM",
  132. .minor_num = 2,
  133. .buffer_free = 1,
  134. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
  135. .autorecording = 1,
  136. .autopurge = 1,
  137. }
  138. };
  139. #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
  140. static char FENCE[] = {"EOR"};
  141. static int vmlogrdr_major = 0;
  142. static struct cdev *vmlogrdr_cdev = NULL;
  143. static int recording_class_AB;
  144. static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
  145. {
  146. struct vmlogrdr_priv_t * logptr = path->private;
  147. spin_lock(&logptr->priv_lock);
  148. logptr->connection_established = 1;
  149. spin_unlock(&logptr->priv_lock);
  150. wake_up(&conn_wait_queue);
  151. }
  152. static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  153. {
  154. struct vmlogrdr_priv_t * logptr = path->private;
  155. u8 reason = (u8) ipuser[8];
  156. pr_err("vmlogrdr: connection severed with reason %i\n", reason);
  157. iucv_path_sever(path, NULL);
  158. kfree(path);
  159. logptr->path = NULL;
  160. spin_lock(&logptr->priv_lock);
  161. logptr->connection_established = 0;
  162. logptr->iucv_path_severed = 1;
  163. spin_unlock(&logptr->priv_lock);
  164. wake_up(&conn_wait_queue);
  165. /* just in case we're sleeping waiting for a record */
  166. wake_up_interruptible(&read_wait_queue);
  167. }
  168. static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
  169. struct iucv_message *msg)
  170. {
  171. struct vmlogrdr_priv_t * logptr = path->private;
  172. /*
  173. * This function is the bottom half so it should be quick.
  174. * Copy the external interrupt data into our local eib and increment
  175. * the usage count
  176. */
  177. spin_lock(&logptr->priv_lock);
  178. memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
  179. atomic_inc(&logptr->receive_ready);
  180. spin_unlock(&logptr->priv_lock);
  181. wake_up_interruptible(&read_wait_queue);
  182. }
  183. static int vmlogrdr_get_recording_class_AB(void)
  184. {
  185. static const char cp_command[] = "QUERY COMMAND RECORDING ";
  186. char cp_response[80];
  187. char *tail;
  188. int len,i;
  189. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  190. len = strnlen(cp_response,sizeof(cp_response));
  191. // now the parsing
  192. tail=strnchr(cp_response,len,'=');
  193. if (!tail)
  194. return 0;
  195. tail++;
  196. if (!strncmp("ANY",tail,3))
  197. return 1;
  198. if (!strncmp("NONE",tail,4))
  199. return 0;
  200. /*
  201. * expect comma separated list of classes here, if one of them
  202. * is A or B return 1 otherwise 0
  203. */
  204. for (i=tail-cp_response; i<len; i++)
  205. if ( cp_response[i]=='A' || cp_response[i]=='B' )
  206. return 1;
  207. return 0;
  208. }
  209. static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
  210. int action, int purge)
  211. {
  212. char cp_command[80];
  213. char cp_response[160];
  214. char *onoff, *qid_string;
  215. int rc;
  216. onoff = ((action == 1) ? "ON" : "OFF");
  217. qid_string = ((recording_class_AB == 1) ? " QID * " : "");
  218. /*
  219. * The recording commands needs to be called with option QID
  220. * for guests that have previlege classes A or B.
  221. * Purging has to be done as separate step, because recording
  222. * can't be switched on as long as records are on the queue.
  223. * Doing both at the same time doesn't work.
  224. */
  225. if (purge && (action == 1)) {
  226. memset(cp_command, 0x00, sizeof(cp_command));
  227. memset(cp_response, 0x00, sizeof(cp_response));
  228. snprintf(cp_command, sizeof(cp_command),
  229. "RECORDING %s PURGE %s",
  230. logptr->recording_name,
  231. qid_string);
  232. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  233. }
  234. memset(cp_command, 0x00, sizeof(cp_command));
  235. memset(cp_response, 0x00, sizeof(cp_response));
  236. snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
  237. logptr->recording_name,
  238. onoff,
  239. qid_string);
  240. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  241. /* The recording command will usually answer with 'Command complete'
  242. * on success, but when the specific service was never connected
  243. * before then there might be an additional informational message
  244. * 'HCPCRC8072I Recording entry not found' before the
  245. * 'Command complete'. So I use strstr rather then the strncmp.
  246. */
  247. if (strstr(cp_response,"Command complete"))
  248. rc = 0;
  249. else
  250. rc = -EIO;
  251. /*
  252. * If we turn recording off, we have to purge any remaining records
  253. * afterwards, as a large number of queued records may impact z/VM
  254. * performance.
  255. */
  256. if (purge && (action == 0)) {
  257. memset(cp_command, 0x00, sizeof(cp_command));
  258. memset(cp_response, 0x00, sizeof(cp_response));
  259. snprintf(cp_command, sizeof(cp_command),
  260. "RECORDING %s PURGE %s",
  261. logptr->recording_name,
  262. qid_string);
  263. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  264. }
  265. return rc;
  266. }
  267. static int vmlogrdr_open (struct inode *inode, struct file *filp)
  268. {
  269. int dev_num = 0;
  270. struct vmlogrdr_priv_t * logptr = NULL;
  271. int connect_rc = 0;
  272. int ret;
  273. dev_num = iminor(inode);
  274. if (dev_num > MAXMINOR)
  275. return -ENODEV;
  276. logptr = &sys_ser[dev_num];
  277. /*
  278. * only allow for blocking reads to be open
  279. */
  280. if (filp->f_flags & O_NONBLOCK)
  281. return -ENOSYS;
  282. /* Besure this device hasn't already been opened */
  283. spin_lock_bh(&logptr->priv_lock);
  284. if (logptr->dev_in_use) {
  285. spin_unlock_bh(&logptr->priv_lock);
  286. return -EBUSY;
  287. }
  288. logptr->dev_in_use = 1;
  289. logptr->connection_established = 0;
  290. logptr->iucv_path_severed = 0;
  291. atomic_set(&logptr->receive_ready, 0);
  292. logptr->buffer_free = 1;
  293. spin_unlock_bh(&logptr->priv_lock);
  294. /* set the file options */
  295. filp->private_data = logptr;
  296. filp->f_op = &vmlogrdr_fops;
  297. /* start recording for this service*/
  298. if (logptr->autorecording) {
  299. ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
  300. if (ret)
  301. pr_warning("vmlogrdr: failed to start "
  302. "recording automatically\n");
  303. }
  304. /* create connection to the system service */
  305. logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
  306. if (!logptr->path)
  307. goto out_dev;
  308. connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
  309. logptr->system_service, NULL, NULL,
  310. logptr);
  311. if (connect_rc) {
  312. pr_err("vmlogrdr: iucv connection to %s "
  313. "failed with rc %i \n",
  314. logptr->system_service, connect_rc);
  315. goto out_path;
  316. }
  317. /* We've issued the connect and now we must wait for a
  318. * ConnectionComplete or ConnectinSevered Interrupt
  319. * before we can continue to process.
  320. */
  321. wait_event(conn_wait_queue, (logptr->connection_established)
  322. || (logptr->iucv_path_severed));
  323. if (logptr->iucv_path_severed)
  324. goto out_record;
  325. nonseekable_open(inode, filp);
  326. return 0;
  327. out_record:
  328. if (logptr->autorecording)
  329. vmlogrdr_recording(logptr,0,logptr->autopurge);
  330. out_path:
  331. kfree(logptr->path); /* kfree(NULL) is ok. */
  332. logptr->path = NULL;
  333. out_dev:
  334. logptr->dev_in_use = 0;
  335. return -EIO;
  336. }
  337. static int vmlogrdr_release (struct inode *inode, struct file *filp)
  338. {
  339. int ret;
  340. struct vmlogrdr_priv_t * logptr = filp->private_data;
  341. iucv_path_sever(logptr->path, NULL);
  342. kfree(logptr->path);
  343. logptr->path = NULL;
  344. if (logptr->autorecording) {
  345. ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
  346. if (ret)
  347. pr_warning("vmlogrdr: failed to stop "
  348. "recording automatically\n");
  349. }
  350. logptr->dev_in_use = 0;
  351. return 0;
  352. }
  353. static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
  354. {
  355. int rc, *temp;
  356. /* we need to keep track of two data sizes here:
  357. * The number of bytes we need to receive from iucv and
  358. * the total number of bytes we actually write into the buffer.
  359. */
  360. int user_data_count, iucv_data_count;
  361. char * buffer;
  362. if (atomic_read(&priv->receive_ready)) {
  363. spin_lock_bh(&priv->priv_lock);
  364. if (priv->residual_length){
  365. /* receive second half of a record */
  366. iucv_data_count = priv->residual_length;
  367. user_data_count = 0;
  368. buffer = priv->buffer;
  369. } else {
  370. /* receive a new record:
  371. * We need to return the total length of the record
  372. * + size of FENCE in the first 4 bytes of the buffer.
  373. */
  374. iucv_data_count = priv->local_interrupt_buffer.length;
  375. user_data_count = sizeof(int);
  376. temp = (int*)priv->buffer;
  377. *temp= iucv_data_count + sizeof(FENCE);
  378. buffer = priv->buffer + sizeof(int);
  379. }
  380. /*
  381. * If the record is bigger than our buffer, we receive only
  382. * a part of it. We can get the rest later.
  383. */
  384. if (iucv_data_count > NET_BUFFER_SIZE)
  385. iucv_data_count = NET_BUFFER_SIZE;
  386. rc = iucv_message_receive(priv->path,
  387. &priv->local_interrupt_buffer,
  388. 0, buffer, iucv_data_count,
  389. &priv->residual_length);
  390. spin_unlock_bh(&priv->priv_lock);
  391. /* An rc of 5 indicates that the record was bigger than
  392. * the buffer, which is OK for us. A 9 indicates that the
  393. * record was purged befor we could receive it.
  394. */
  395. if (rc == 5)
  396. rc = 0;
  397. if (rc == 9)
  398. atomic_set(&priv->receive_ready, 0);
  399. } else {
  400. rc = 1;
  401. }
  402. if (!rc) {
  403. priv->buffer_free = 0;
  404. user_data_count += iucv_data_count;
  405. priv->current_position = priv->buffer;
  406. if (priv->residual_length == 0){
  407. /* the whole record has been captured,
  408. * now add the fence */
  409. atomic_dec(&priv->receive_ready);
  410. buffer = priv->buffer + user_data_count;
  411. memcpy(buffer, FENCE, sizeof(FENCE));
  412. user_data_count += sizeof(FENCE);
  413. }
  414. priv->remaining = user_data_count;
  415. }
  416. return rc;
  417. }
  418. static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
  419. size_t count, loff_t * ppos)
  420. {
  421. int rc;
  422. struct vmlogrdr_priv_t * priv = filp->private_data;
  423. while (priv->buffer_free) {
  424. rc = vmlogrdr_receive_data(priv);
  425. if (rc) {
  426. rc = wait_event_interruptible(read_wait_queue,
  427. atomic_read(&priv->receive_ready));
  428. if (rc)
  429. return rc;
  430. }
  431. }
  432. /* copy only up to end of record */
  433. if (count > priv->remaining)
  434. count = priv->remaining;
  435. if (copy_to_user(data, priv->current_position, count))
  436. return -EFAULT;
  437. *ppos += count;
  438. priv->current_position += count;
  439. priv->remaining -= count;
  440. /* if all data has been transferred, set buffer free */
  441. if (priv->remaining == 0)
  442. priv->buffer_free = 1;
  443. return count;
  444. }
  445. static ssize_t vmlogrdr_autopurge_store(struct device * dev,
  446. struct device_attribute *attr,
  447. const char * buf, size_t count)
  448. {
  449. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  450. ssize_t ret = count;
  451. switch (buf[0]) {
  452. case '0':
  453. priv->autopurge=0;
  454. break;
  455. case '1':
  456. priv->autopurge=1;
  457. break;
  458. default:
  459. ret = -EINVAL;
  460. }
  461. return ret;
  462. }
  463. static ssize_t vmlogrdr_autopurge_show(struct device *dev,
  464. struct device_attribute *attr,
  465. char *buf)
  466. {
  467. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  468. return sprintf(buf, "%u\n", priv->autopurge);
  469. }
  470. static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
  471. vmlogrdr_autopurge_store);
  472. static ssize_t vmlogrdr_purge_store(struct device * dev,
  473. struct device_attribute *attr,
  474. const char * buf, size_t count)
  475. {
  476. char cp_command[80];
  477. char cp_response[80];
  478. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  479. if (buf[0] != '1')
  480. return -EINVAL;
  481. memset(cp_command, 0x00, sizeof(cp_command));
  482. memset(cp_response, 0x00, sizeof(cp_response));
  483. /*
  484. * The recording command needs to be called with option QID
  485. * for guests that have previlege classes A or B.
  486. * Other guests will not recognize the command and we have to
  487. * issue the same command without the QID parameter.
  488. */
  489. if (recording_class_AB)
  490. snprintf(cp_command, sizeof(cp_command),
  491. "RECORDING %s PURGE QID * ",
  492. priv->recording_name);
  493. else
  494. snprintf(cp_command, sizeof(cp_command),
  495. "RECORDING %s PURGE ",
  496. priv->recording_name);
  497. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  498. return count;
  499. }
  500. static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
  501. static ssize_t vmlogrdr_autorecording_store(struct device *dev,
  502. struct device_attribute *attr,
  503. const char *buf, size_t count)
  504. {
  505. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  506. ssize_t ret = count;
  507. switch (buf[0]) {
  508. case '0':
  509. priv->autorecording=0;
  510. break;
  511. case '1':
  512. priv->autorecording=1;
  513. break;
  514. default:
  515. ret = -EINVAL;
  516. }
  517. return ret;
  518. }
  519. static ssize_t vmlogrdr_autorecording_show(struct device *dev,
  520. struct device_attribute *attr,
  521. char *buf)
  522. {
  523. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  524. return sprintf(buf, "%u\n", priv->autorecording);
  525. }
  526. static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
  527. vmlogrdr_autorecording_store);
  528. static ssize_t vmlogrdr_recording_store(struct device * dev,
  529. struct device_attribute *attr,
  530. const char * buf, size_t count)
  531. {
  532. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  533. ssize_t ret;
  534. switch (buf[0]) {
  535. case '0':
  536. ret = vmlogrdr_recording(priv,0,0);
  537. break;
  538. case '1':
  539. ret = vmlogrdr_recording(priv,1,0);
  540. break;
  541. default:
  542. ret = -EINVAL;
  543. }
  544. if (ret)
  545. return ret;
  546. else
  547. return count;
  548. }
  549. static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
  550. static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
  551. char *buf)
  552. {
  553. static const char cp_command[] = "QUERY RECORDING ";
  554. int len;
  555. cpcmd(cp_command, buf, 4096, NULL);
  556. len = strlen(buf);
  557. return len;
  558. }
  559. static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
  560. NULL);
  561. static struct attribute *vmlogrdr_attrs[] = {
  562. &dev_attr_autopurge.attr,
  563. &dev_attr_purge.attr,
  564. &dev_attr_autorecording.attr,
  565. &dev_attr_recording.attr,
  566. NULL,
  567. };
  568. static int vmlogrdr_pm_prepare(struct device *dev)
  569. {
  570. int rc;
  571. struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
  572. rc = 0;
  573. if (priv) {
  574. spin_lock_bh(&priv->priv_lock);
  575. if (priv->dev_in_use)
  576. rc = -EBUSY;
  577. spin_unlock_bh(&priv->priv_lock);
  578. }
  579. if (rc)
  580. pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
  581. dev_name(dev));
  582. return rc;
  583. }
  584. static const struct dev_pm_ops vmlogrdr_pm_ops = {
  585. .prepare = vmlogrdr_pm_prepare,
  586. };
  587. static struct attribute_group vmlogrdr_attr_group = {
  588. .attrs = vmlogrdr_attrs,
  589. };
  590. static struct class *vmlogrdr_class;
  591. static struct device_driver vmlogrdr_driver = {
  592. .name = "vmlogrdr",
  593. .bus = &iucv_bus,
  594. .pm = &vmlogrdr_pm_ops,
  595. };
  596. static int vmlogrdr_register_driver(void)
  597. {
  598. int ret;
  599. /* Register with iucv driver */
  600. ret = iucv_register(&vmlogrdr_iucv_handler, 1);
  601. if (ret)
  602. goto out;
  603. ret = driver_register(&vmlogrdr_driver);
  604. if (ret)
  605. goto out_iucv;
  606. ret = driver_create_file(&vmlogrdr_driver,
  607. &driver_attr_recording_status);
  608. if (ret)
  609. goto out_driver;
  610. vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
  611. if (IS_ERR(vmlogrdr_class)) {
  612. ret = PTR_ERR(vmlogrdr_class);
  613. vmlogrdr_class = NULL;
  614. goto out_attr;
  615. }
  616. return 0;
  617. out_attr:
  618. driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
  619. out_driver:
  620. driver_unregister(&vmlogrdr_driver);
  621. out_iucv:
  622. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  623. out:
  624. return ret;
  625. }
  626. static void vmlogrdr_unregister_driver(void)
  627. {
  628. class_destroy(vmlogrdr_class);
  629. vmlogrdr_class = NULL;
  630. driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
  631. driver_unregister(&vmlogrdr_driver);
  632. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  633. }
  634. static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
  635. {
  636. struct device *dev;
  637. int ret;
  638. dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  639. if (dev) {
  640. dev_set_name(dev, priv->internal_name);
  641. dev->bus = &iucv_bus;
  642. dev->parent = iucv_root;
  643. dev->driver = &vmlogrdr_driver;
  644. dev_set_drvdata(dev, priv);
  645. /*
  646. * The release function could be called after the
  647. * module has been unloaded. It's _only_ task is to
  648. * free the struct. Therefore, we specify kfree()
  649. * directly here. (Probably a little bit obfuscating
  650. * but legitime ...).
  651. */
  652. dev->release = (void (*)(struct device *))kfree;
  653. } else
  654. return -ENOMEM;
  655. ret = device_register(dev);
  656. if (ret) {
  657. put_device(dev);
  658. return ret;
  659. }
  660. ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
  661. if (ret) {
  662. device_unregister(dev);
  663. return ret;
  664. }
  665. priv->class_device = device_create(vmlogrdr_class, dev,
  666. MKDEV(vmlogrdr_major,
  667. priv->minor_num),
  668. priv, "%s", dev_name(dev));
  669. if (IS_ERR(priv->class_device)) {
  670. ret = PTR_ERR(priv->class_device);
  671. priv->class_device=NULL;
  672. sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
  673. device_unregister(dev);
  674. return ret;
  675. }
  676. priv->device = dev;
  677. return 0;
  678. }
  679. static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
  680. {
  681. device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
  682. if (priv->device != NULL) {
  683. sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
  684. device_unregister(priv->device);
  685. priv->device=NULL;
  686. }
  687. return 0;
  688. }
  689. static int vmlogrdr_register_cdev(dev_t dev)
  690. {
  691. int rc = 0;
  692. vmlogrdr_cdev = cdev_alloc();
  693. if (!vmlogrdr_cdev) {
  694. return -ENOMEM;
  695. }
  696. vmlogrdr_cdev->owner = THIS_MODULE;
  697. vmlogrdr_cdev->ops = &vmlogrdr_fops;
  698. vmlogrdr_cdev->dev = dev;
  699. rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
  700. if (!rc)
  701. return 0;
  702. // cleanup: cdev is not fully registered, no cdev_del here!
  703. kobject_put(&vmlogrdr_cdev->kobj);
  704. vmlogrdr_cdev=NULL;
  705. return rc;
  706. }
  707. static void vmlogrdr_cleanup(void)
  708. {
  709. int i;
  710. if (vmlogrdr_cdev) {
  711. cdev_del(vmlogrdr_cdev);
  712. vmlogrdr_cdev=NULL;
  713. }
  714. for (i=0; i < MAXMINOR; ++i ) {
  715. vmlogrdr_unregister_device(&sys_ser[i]);
  716. free_page((unsigned long)sys_ser[i].buffer);
  717. }
  718. vmlogrdr_unregister_driver();
  719. if (vmlogrdr_major) {
  720. unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
  721. vmlogrdr_major=0;
  722. }
  723. }
  724. static int __init vmlogrdr_init(void)
  725. {
  726. int rc;
  727. int i;
  728. dev_t dev;
  729. if (! MACHINE_IS_VM) {
  730. pr_err("not running under VM, driver not loaded.\n");
  731. return -ENODEV;
  732. }
  733. recording_class_AB = vmlogrdr_get_recording_class_AB();
  734. rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
  735. if (rc)
  736. return rc;
  737. vmlogrdr_major = MAJOR(dev);
  738. rc=vmlogrdr_register_driver();
  739. if (rc)
  740. goto cleanup;
  741. for (i=0; i < MAXMINOR; ++i ) {
  742. sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
  743. if (!sys_ser[i].buffer) {
  744. rc = -ENOMEM;
  745. break;
  746. }
  747. sys_ser[i].current_position = sys_ser[i].buffer;
  748. rc=vmlogrdr_register_device(&sys_ser[i]);
  749. if (rc)
  750. break;
  751. }
  752. if (rc)
  753. goto cleanup;
  754. rc = vmlogrdr_register_cdev(dev);
  755. if (rc)
  756. goto cleanup;
  757. return 0;
  758. cleanup:
  759. vmlogrdr_cleanup();
  760. return rc;
  761. }
  762. static void __exit vmlogrdr_exit(void)
  763. {
  764. vmlogrdr_cleanup();
  765. return;
  766. }
  767. module_init(vmlogrdr_init);
  768. module_exit(vmlogrdr_exit);