vmlogrdr.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * drivers/s390/char/vmlogrdr.c
  3. * character device driver for reading z/VM system service records
  4. *
  5. *
  6. * Copyright 2004 IBM Corporation
  7. * character device driver for reading z/VM system service records,
  8. * Version 1.0
  9. * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
  10. * Stefan Weinhuber <wein@de.ibm.com>
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/errno.h>
  16. #include <linux/types.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/spinlock.h>
  19. #include <asm/atomic.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/cpcmd.h>
  22. #include <asm/debug.h>
  23. #include <asm/ebcdic.h>
  24. #include <net/iucv/iucv.h>
  25. #include <linux/kmod.h>
  26. #include <linux/cdev.h>
  27. #include <linux/device.h>
  28. #include <linux/smp_lock.h>
  29. #include <linux/string.h>
  30. MODULE_AUTHOR
  31. ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
  32. " Stefan Weinhuber (wein@de.ibm.com)");
  33. MODULE_DESCRIPTION ("Character device driver for reading z/VM "
  34. "system service records.");
  35. MODULE_LICENSE("GPL");
  36. /*
  37. * The size of the buffer for iucv data transfer is one page,
  38. * but in addition to the data we read from iucv we also
  39. * place an integer and some characters into that buffer,
  40. * so the maximum size for record data is a little less then
  41. * one page.
  42. */
  43. #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
  44. /*
  45. * The elements that are concurrently accessed by bottom halves are
  46. * connection_established, iucv_path_severed, local_interrupt_buffer
  47. * and receive_ready. The first three can be protected by
  48. * priv_lock. receive_ready is atomic, so it can be incremented and
  49. * decremented without holding a lock.
  50. * The variable dev_in_use needs to be protected by the lock, since
  51. * it's a flag used by open to make sure that the device is opened only
  52. * by one user at the same time.
  53. */
  54. struct vmlogrdr_priv_t {
  55. char system_service[8];
  56. char internal_name[8];
  57. char recording_name[8];
  58. struct iucv_path *path;
  59. int connection_established;
  60. int iucv_path_severed;
  61. struct iucv_message local_interrupt_buffer;
  62. atomic_t receive_ready;
  63. int minor_num;
  64. char * buffer;
  65. char * current_position;
  66. int remaining;
  67. ulong residual_length;
  68. int buffer_free;
  69. int dev_in_use; /* 1: already opened, 0: not opened*/
  70. spinlock_t priv_lock;
  71. struct device *device;
  72. struct device *class_device;
  73. int autorecording;
  74. int autopurge;
  75. };
  76. /*
  77. * File operation structure for vmlogrdr devices
  78. */
  79. static int vmlogrdr_open(struct inode *, struct file *);
  80. static int vmlogrdr_release(struct inode *, struct file *);
  81. static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
  82. size_t count, loff_t * ppos);
  83. static const struct file_operations vmlogrdr_fops = {
  84. .owner = THIS_MODULE,
  85. .open = vmlogrdr_open,
  86. .release = vmlogrdr_release,
  87. .read = vmlogrdr_read,
  88. };
  89. static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
  90. static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
  91. static void vmlogrdr_iucv_message_pending(struct iucv_path *,
  92. struct iucv_message *);
  93. static struct iucv_handler vmlogrdr_iucv_handler = {
  94. .path_complete = vmlogrdr_iucv_path_complete,
  95. .path_severed = vmlogrdr_iucv_path_severed,
  96. .message_pending = vmlogrdr_iucv_message_pending,
  97. };
  98. static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
  99. static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
  100. /*
  101. * pointer to system service private structure
  102. * minor number 0 --> logrec
  103. * minor number 1 --> account
  104. * minor number 2 --> symptom
  105. */
  106. static struct vmlogrdr_priv_t sys_ser[] = {
  107. { .system_service = "*LOGREC ",
  108. .internal_name = "logrec",
  109. .recording_name = "EREP",
  110. .minor_num = 0,
  111. .buffer_free = 1,
  112. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
  113. .autorecording = 1,
  114. .autopurge = 1,
  115. },
  116. { .system_service = "*ACCOUNT",
  117. .internal_name = "account",
  118. .recording_name = "ACCOUNT",
  119. .minor_num = 1,
  120. .buffer_free = 1,
  121. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
  122. .autorecording = 1,
  123. .autopurge = 1,
  124. },
  125. { .system_service = "*SYMPTOM",
  126. .internal_name = "symptom",
  127. .recording_name = "SYMPTOM",
  128. .minor_num = 2,
  129. .buffer_free = 1,
  130. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
  131. .autorecording = 1,
  132. .autopurge = 1,
  133. }
  134. };
  135. #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
  136. static char FENCE[] = {"EOR"};
  137. static int vmlogrdr_major = 0;
  138. static struct cdev *vmlogrdr_cdev = NULL;
  139. static int recording_class_AB;
  140. static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
  141. {
  142. struct vmlogrdr_priv_t * logptr = path->private;
  143. spin_lock(&logptr->priv_lock);
  144. logptr->connection_established = 1;
  145. spin_unlock(&logptr->priv_lock);
  146. wake_up(&conn_wait_queue);
  147. }
  148. static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  149. {
  150. struct vmlogrdr_priv_t * logptr = path->private;
  151. u8 reason = (u8) ipuser[8];
  152. printk (KERN_ERR "vmlogrdr: connection severed with"
  153. " reason %i\n", reason);
  154. iucv_path_sever(path, NULL);
  155. kfree(path);
  156. logptr->path = NULL;
  157. spin_lock(&logptr->priv_lock);
  158. logptr->connection_established = 0;
  159. logptr->iucv_path_severed = 1;
  160. spin_unlock(&logptr->priv_lock);
  161. wake_up(&conn_wait_queue);
  162. /* just in case we're sleeping waiting for a record */
  163. wake_up_interruptible(&read_wait_queue);
  164. }
  165. static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
  166. struct iucv_message *msg)
  167. {
  168. struct vmlogrdr_priv_t * logptr = path->private;
  169. /*
  170. * This function is the bottom half so it should be quick.
  171. * Copy the external interrupt data into our local eib and increment
  172. * the usage count
  173. */
  174. spin_lock(&logptr->priv_lock);
  175. memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
  176. atomic_inc(&logptr->receive_ready);
  177. spin_unlock(&logptr->priv_lock);
  178. wake_up_interruptible(&read_wait_queue);
  179. }
  180. static int vmlogrdr_get_recording_class_AB(void)
  181. {
  182. char cp_command[]="QUERY COMMAND RECORDING ";
  183. char cp_response[80];
  184. char *tail;
  185. int len,i;
  186. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  187. len = strnlen(cp_response,sizeof(cp_response));
  188. // now the parsing
  189. tail=strnchr(cp_response,len,'=');
  190. if (!tail)
  191. return 0;
  192. tail++;
  193. if (!strncmp("ANY",tail,3))
  194. return 1;
  195. if (!strncmp("NONE",tail,4))
  196. return 0;
  197. /*
  198. * expect comma separated list of classes here, if one of them
  199. * is A or B return 1 otherwise 0
  200. */
  201. for (i=tail-cp_response; i<len; i++)
  202. if ( cp_response[i]=='A' || cp_response[i]=='B' )
  203. return 1;
  204. return 0;
  205. }
  206. static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
  207. int action, int purge)
  208. {
  209. char cp_command[80];
  210. char cp_response[160];
  211. char *onoff, *qid_string;
  212. memset(cp_command, 0x00, sizeof(cp_command));
  213. memset(cp_response, 0x00, sizeof(cp_response));
  214. onoff = ((action == 1) ? "ON" : "OFF");
  215. qid_string = ((recording_class_AB == 1) ? " QID * " : "");
  216. /*
  217. * The recording commands needs to be called with option QID
  218. * for guests that have previlege classes A or B.
  219. * Purging has to be done as separate step, because recording
  220. * can't be switched on as long as records are on the queue.
  221. * Doing both at the same time doesn't work.
  222. */
  223. if (purge) {
  224. snprintf(cp_command, sizeof(cp_command),
  225. "RECORDING %s PURGE %s",
  226. logptr->recording_name,
  227. qid_string);
  228. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  229. }
  230. memset(cp_command, 0x00, sizeof(cp_command));
  231. memset(cp_response, 0x00, sizeof(cp_response));
  232. snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
  233. logptr->recording_name,
  234. onoff,
  235. qid_string);
  236. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  237. /* The recording command will usually answer with 'Command complete'
  238. * on success, but when the specific service was never connected
  239. * before then there might be an additional informational message
  240. * 'HCPCRC8072I Recording entry not found' before the
  241. * 'Command complete'. So I use strstr rather then the strncmp.
  242. */
  243. if (strstr(cp_response,"Command complete"))
  244. return 0;
  245. else
  246. return -EIO;
  247. }
  248. static int vmlogrdr_open (struct inode *inode, struct file *filp)
  249. {
  250. int dev_num = 0;
  251. struct vmlogrdr_priv_t * logptr = NULL;
  252. int connect_rc = 0;
  253. int ret;
  254. dev_num = iminor(inode);
  255. if (dev_num > MAXMINOR)
  256. return -ENODEV;
  257. logptr = &sys_ser[dev_num];
  258. /*
  259. * only allow for blocking reads to be open
  260. */
  261. if (filp->f_flags & O_NONBLOCK)
  262. return -ENOSYS;
  263. /* Besure this device hasn't already been opened */
  264. lock_kernel();
  265. spin_lock_bh(&logptr->priv_lock);
  266. if (logptr->dev_in_use) {
  267. spin_unlock_bh(&logptr->priv_lock);
  268. unlock_kernel();
  269. return -EBUSY;
  270. }
  271. logptr->dev_in_use = 1;
  272. logptr->connection_established = 0;
  273. logptr->iucv_path_severed = 0;
  274. atomic_set(&logptr->receive_ready, 0);
  275. logptr->buffer_free = 1;
  276. spin_unlock_bh(&logptr->priv_lock);
  277. /* set the file options */
  278. filp->private_data = logptr;
  279. filp->f_op = &vmlogrdr_fops;
  280. /* start recording for this service*/
  281. if (logptr->autorecording) {
  282. ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
  283. if (ret)
  284. printk (KERN_WARNING "vmlogrdr: failed to start "
  285. "recording automatically\n");
  286. }
  287. /* create connection to the system service */
  288. logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
  289. if (!logptr->path)
  290. goto out_dev;
  291. connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
  292. logptr->system_service, NULL, NULL,
  293. logptr);
  294. if (connect_rc) {
  295. printk (KERN_ERR "vmlogrdr: iucv connection to %s "
  296. "failed with rc %i \n", logptr->system_service,
  297. connect_rc);
  298. goto out_path;
  299. }
  300. /* We've issued the connect and now we must wait for a
  301. * ConnectionComplete or ConnectinSevered Interrupt
  302. * before we can continue to process.
  303. */
  304. wait_event(conn_wait_queue, (logptr->connection_established)
  305. || (logptr->iucv_path_severed));
  306. if (logptr->iucv_path_severed)
  307. goto out_record;
  308. ret = nonseekable_open(inode, filp);
  309. unlock_kernel();
  310. return ret;
  311. out_record:
  312. if (logptr->autorecording)
  313. vmlogrdr_recording(logptr,0,logptr->autopurge);
  314. out_path:
  315. kfree(logptr->path); /* kfree(NULL) is ok. */
  316. logptr->path = NULL;
  317. out_dev:
  318. logptr->dev_in_use = 0;
  319. unlock_kernel();
  320. return -EIO;
  321. }
  322. static int vmlogrdr_release (struct inode *inode, struct file *filp)
  323. {
  324. int ret;
  325. struct vmlogrdr_priv_t * logptr = filp->private_data;
  326. iucv_path_sever(logptr->path, NULL);
  327. kfree(logptr->path);
  328. logptr->path = NULL;
  329. if (logptr->autorecording) {
  330. ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
  331. if (ret)
  332. printk (KERN_WARNING "vmlogrdr: failed to stop "
  333. "recording automatically\n");
  334. }
  335. logptr->dev_in_use = 0;
  336. return 0;
  337. }
  338. static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
  339. {
  340. int rc, *temp;
  341. /* we need to keep track of two data sizes here:
  342. * The number of bytes we need to receive from iucv and
  343. * the total number of bytes we actually write into the buffer.
  344. */
  345. int user_data_count, iucv_data_count;
  346. char * buffer;
  347. if (atomic_read(&priv->receive_ready)) {
  348. spin_lock_bh(&priv->priv_lock);
  349. if (priv->residual_length){
  350. /* receive second half of a record */
  351. iucv_data_count = priv->residual_length;
  352. user_data_count = 0;
  353. buffer = priv->buffer;
  354. } else {
  355. /* receive a new record:
  356. * We need to return the total length of the record
  357. * + size of FENCE in the first 4 bytes of the buffer.
  358. */
  359. iucv_data_count = priv->local_interrupt_buffer.length;
  360. user_data_count = sizeof(int);
  361. temp = (int*)priv->buffer;
  362. *temp= iucv_data_count + sizeof(FENCE);
  363. buffer = priv->buffer + sizeof(int);
  364. }
  365. /*
  366. * If the record is bigger then our buffer, we receive only
  367. * a part of it. We can get the rest later.
  368. */
  369. if (iucv_data_count > NET_BUFFER_SIZE)
  370. iucv_data_count = NET_BUFFER_SIZE;
  371. rc = iucv_message_receive(priv->path,
  372. &priv->local_interrupt_buffer,
  373. 0, buffer, iucv_data_count,
  374. &priv->residual_length);
  375. spin_unlock_bh(&priv->priv_lock);
  376. /* An rc of 5 indicates that the record was bigger then
  377. * the buffer, which is OK for us. A 9 indicates that the
  378. * record was purged befor we could receive it.
  379. */
  380. if (rc == 5)
  381. rc = 0;
  382. if (rc == 9)
  383. atomic_set(&priv->receive_ready, 0);
  384. } else {
  385. rc = 1;
  386. }
  387. if (!rc) {
  388. priv->buffer_free = 0;
  389. user_data_count += iucv_data_count;
  390. priv->current_position = priv->buffer;
  391. if (priv->residual_length == 0){
  392. /* the whole record has been captured,
  393. * now add the fence */
  394. atomic_dec(&priv->receive_ready);
  395. buffer = priv->buffer + user_data_count;
  396. memcpy(buffer, FENCE, sizeof(FENCE));
  397. user_data_count += sizeof(FENCE);
  398. }
  399. priv->remaining = user_data_count;
  400. }
  401. return rc;
  402. }
  403. static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
  404. size_t count, loff_t * ppos)
  405. {
  406. int rc;
  407. struct vmlogrdr_priv_t * priv = filp->private_data;
  408. while (priv->buffer_free) {
  409. rc = vmlogrdr_receive_data(priv);
  410. if (rc) {
  411. rc = wait_event_interruptible(read_wait_queue,
  412. atomic_read(&priv->receive_ready));
  413. if (rc)
  414. return rc;
  415. }
  416. }
  417. /* copy only up to end of record */
  418. if (count > priv->remaining)
  419. count = priv->remaining;
  420. if (copy_to_user(data, priv->current_position, count))
  421. return -EFAULT;
  422. *ppos += count;
  423. priv->current_position += count;
  424. priv->remaining -= count;
  425. /* if all data has been transferred, set buffer free */
  426. if (priv->remaining == 0)
  427. priv->buffer_free = 1;
  428. return count;
  429. }
  430. static ssize_t vmlogrdr_autopurge_store(struct device * dev,
  431. struct device_attribute *attr,
  432. const char * buf, size_t count)
  433. {
  434. struct vmlogrdr_priv_t *priv = dev->driver_data;
  435. ssize_t ret = count;
  436. switch (buf[0]) {
  437. case '0':
  438. priv->autopurge=0;
  439. break;
  440. case '1':
  441. priv->autopurge=1;
  442. break;
  443. default:
  444. ret = -EINVAL;
  445. }
  446. return ret;
  447. }
  448. static ssize_t vmlogrdr_autopurge_show(struct device *dev,
  449. struct device_attribute *attr,
  450. char *buf)
  451. {
  452. struct vmlogrdr_priv_t *priv = dev->driver_data;
  453. return sprintf(buf, "%u\n", priv->autopurge);
  454. }
  455. static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
  456. vmlogrdr_autopurge_store);
  457. static ssize_t vmlogrdr_purge_store(struct device * dev,
  458. struct device_attribute *attr,
  459. const char * buf, size_t count)
  460. {
  461. char cp_command[80];
  462. char cp_response[80];
  463. struct vmlogrdr_priv_t *priv = dev->driver_data;
  464. if (buf[0] != '1')
  465. return -EINVAL;
  466. memset(cp_command, 0x00, sizeof(cp_command));
  467. memset(cp_response, 0x00, sizeof(cp_response));
  468. /*
  469. * The recording command needs to be called with option QID
  470. * for guests that have previlege classes A or B.
  471. * Other guests will not recognize the command and we have to
  472. * issue the same command without the QID parameter.
  473. */
  474. if (recording_class_AB)
  475. snprintf(cp_command, sizeof(cp_command),
  476. "RECORDING %s PURGE QID * ",
  477. priv->recording_name);
  478. else
  479. snprintf(cp_command, sizeof(cp_command),
  480. "RECORDING %s PURGE ",
  481. priv->recording_name);
  482. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  483. return count;
  484. }
  485. static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
  486. static ssize_t vmlogrdr_autorecording_store(struct device *dev,
  487. struct device_attribute *attr,
  488. const char *buf, size_t count)
  489. {
  490. struct vmlogrdr_priv_t *priv = dev->driver_data;
  491. ssize_t ret = count;
  492. switch (buf[0]) {
  493. case '0':
  494. priv->autorecording=0;
  495. break;
  496. case '1':
  497. priv->autorecording=1;
  498. break;
  499. default:
  500. ret = -EINVAL;
  501. }
  502. return ret;
  503. }
  504. static ssize_t vmlogrdr_autorecording_show(struct device *dev,
  505. struct device_attribute *attr,
  506. char *buf)
  507. {
  508. struct vmlogrdr_priv_t *priv = dev->driver_data;
  509. return sprintf(buf, "%u\n", priv->autorecording);
  510. }
  511. static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
  512. vmlogrdr_autorecording_store);
  513. static ssize_t vmlogrdr_recording_store(struct device * dev,
  514. struct device_attribute *attr,
  515. const char * buf, size_t count)
  516. {
  517. struct vmlogrdr_priv_t *priv = dev->driver_data;
  518. ssize_t ret;
  519. switch (buf[0]) {
  520. case '0':
  521. ret = vmlogrdr_recording(priv,0,0);
  522. break;
  523. case '1':
  524. ret = vmlogrdr_recording(priv,1,0);
  525. break;
  526. default:
  527. ret = -EINVAL;
  528. }
  529. if (ret)
  530. return ret;
  531. else
  532. return count;
  533. }
  534. static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
  535. static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
  536. char *buf)
  537. {
  538. char cp_command[] = "QUERY RECORDING ";
  539. int len;
  540. cpcmd(cp_command, buf, 4096, NULL);
  541. len = strlen(buf);
  542. return len;
  543. }
  544. static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
  545. NULL);
  546. static struct attribute *vmlogrdr_attrs[] = {
  547. &dev_attr_autopurge.attr,
  548. &dev_attr_purge.attr,
  549. &dev_attr_autorecording.attr,
  550. &dev_attr_recording.attr,
  551. NULL,
  552. };
  553. static struct attribute_group vmlogrdr_attr_group = {
  554. .attrs = vmlogrdr_attrs,
  555. };
  556. static struct class *vmlogrdr_class;
  557. static struct device_driver vmlogrdr_driver = {
  558. .name = "vmlogrdr",
  559. .bus = &iucv_bus,
  560. };
  561. static int vmlogrdr_register_driver(void)
  562. {
  563. int ret;
  564. /* Register with iucv driver */
  565. ret = iucv_register(&vmlogrdr_iucv_handler, 1);
  566. if (ret)
  567. goto out;
  568. ret = driver_register(&vmlogrdr_driver);
  569. if (ret)
  570. goto out_iucv;
  571. ret = driver_create_file(&vmlogrdr_driver,
  572. &driver_attr_recording_status);
  573. if (ret)
  574. goto out_driver;
  575. vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
  576. if (IS_ERR(vmlogrdr_class)) {
  577. ret = PTR_ERR(vmlogrdr_class);
  578. vmlogrdr_class = NULL;
  579. goto out_attr;
  580. }
  581. return 0;
  582. out_attr:
  583. driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
  584. out_driver:
  585. driver_unregister(&vmlogrdr_driver);
  586. out_iucv:
  587. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  588. out:
  589. return ret;
  590. }
  591. static void vmlogrdr_unregister_driver(void)
  592. {
  593. class_destroy(vmlogrdr_class);
  594. vmlogrdr_class = NULL;
  595. driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
  596. driver_unregister(&vmlogrdr_driver);
  597. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  598. }
  599. static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
  600. {
  601. struct device *dev;
  602. int ret;
  603. dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  604. if (dev) {
  605. dev_set_name(dev, priv->internal_name);
  606. dev->bus = &iucv_bus;
  607. dev->parent = iucv_root;
  608. dev->driver = &vmlogrdr_driver;
  609. /*
  610. * The release function could be called after the
  611. * module has been unloaded. It's _only_ task is to
  612. * free the struct. Therefore, we specify kfree()
  613. * directly here. (Probably a little bit obfuscating
  614. * but legitime ...).
  615. */
  616. dev->release = (void (*)(struct device *))kfree;
  617. } else
  618. return -ENOMEM;
  619. ret = device_register(dev);
  620. if (ret)
  621. return ret;
  622. ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
  623. if (ret) {
  624. device_unregister(dev);
  625. return ret;
  626. }
  627. priv->class_device = device_create(vmlogrdr_class, dev,
  628. MKDEV(vmlogrdr_major,
  629. priv->minor_num),
  630. priv, "%s", dev_name(dev));
  631. if (IS_ERR(priv->class_device)) {
  632. ret = PTR_ERR(priv->class_device);
  633. priv->class_device=NULL;
  634. sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
  635. device_unregister(dev);
  636. return ret;
  637. }
  638. priv->device = dev;
  639. return 0;
  640. }
  641. static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
  642. {
  643. device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
  644. if (priv->device != NULL) {
  645. sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
  646. device_unregister(priv->device);
  647. priv->device=NULL;
  648. }
  649. return 0;
  650. }
  651. static int vmlogrdr_register_cdev(dev_t dev)
  652. {
  653. int rc = 0;
  654. vmlogrdr_cdev = cdev_alloc();
  655. if (!vmlogrdr_cdev) {
  656. return -ENOMEM;
  657. }
  658. vmlogrdr_cdev->owner = THIS_MODULE;
  659. vmlogrdr_cdev->ops = &vmlogrdr_fops;
  660. vmlogrdr_cdev->dev = dev;
  661. rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
  662. if (!rc)
  663. return 0;
  664. // cleanup: cdev is not fully registered, no cdev_del here!
  665. kobject_put(&vmlogrdr_cdev->kobj);
  666. vmlogrdr_cdev=NULL;
  667. return rc;
  668. }
  669. static void vmlogrdr_cleanup(void)
  670. {
  671. int i;
  672. if (vmlogrdr_cdev) {
  673. cdev_del(vmlogrdr_cdev);
  674. vmlogrdr_cdev=NULL;
  675. }
  676. for (i=0; i < MAXMINOR; ++i ) {
  677. vmlogrdr_unregister_device(&sys_ser[i]);
  678. free_page((unsigned long)sys_ser[i].buffer);
  679. }
  680. vmlogrdr_unregister_driver();
  681. if (vmlogrdr_major) {
  682. unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
  683. vmlogrdr_major=0;
  684. }
  685. }
  686. static int __init vmlogrdr_init(void)
  687. {
  688. int rc;
  689. int i;
  690. dev_t dev;
  691. if (! MACHINE_IS_VM) {
  692. printk (KERN_ERR "vmlogrdr: not running under VM, "
  693. "driver not loaded.\n");
  694. return -ENODEV;
  695. }
  696. recording_class_AB = vmlogrdr_get_recording_class_AB();
  697. rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
  698. if (rc)
  699. return rc;
  700. vmlogrdr_major = MAJOR(dev);
  701. rc=vmlogrdr_register_driver();
  702. if (rc)
  703. goto cleanup;
  704. for (i=0; i < MAXMINOR; ++i ) {
  705. sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
  706. if (!sys_ser[i].buffer) {
  707. rc = -ENOMEM;
  708. break;
  709. }
  710. sys_ser[i].current_position = sys_ser[i].buffer;
  711. rc=vmlogrdr_register_device(&sys_ser[i]);
  712. if (rc)
  713. break;
  714. }
  715. if (rc)
  716. goto cleanup;
  717. rc = vmlogrdr_register_cdev(dev);
  718. if (rc)
  719. goto cleanup;
  720. return 0;
  721. cleanup:
  722. vmlogrdr_cleanup();
  723. return rc;
  724. }
  725. static void __exit vmlogrdr_exit(void)
  726. {
  727. vmlogrdr_cleanup();
  728. return;
  729. }
  730. module_init(vmlogrdr_init);
  731. module_exit(vmlogrdr_exit);