vmlogrdr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. /*
  2. * drivers/s390/char/vmlogrdr.c
  3. * character device driver for reading z/VM system service records
  4. *
  5. *
  6. * Copyright 2004 IBM Corporation
  7. * character device driver for reading z/VM system service records,
  8. * Version 1.0
  9. * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
  10. * Stefan Weinhuber <wein@de.ibm.com>
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/errno.h>
  16. #include <linux/types.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/spinlock.h>
  19. #include <asm/atomic.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/cpcmd.h>
  22. #include <asm/debug.h>
  23. #include <asm/ebcdic.h>
  24. #include <net/iucv/iucv.h>
  25. #include <linux/kmod.h>
  26. #include <linux/cdev.h>
  27. #include <linux/device.h>
  28. #include <linux/string.h>
  29. MODULE_AUTHOR
  30. ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
  31. " Stefan Weinhuber (wein@de.ibm.com)");
  32. MODULE_DESCRIPTION ("Character device driver for reading z/VM "
  33. "system service records.");
  34. MODULE_LICENSE("GPL");
  35. /*
  36. * The size of the buffer for iucv data transfer is one page,
  37. * but in addition to the data we read from iucv we also
  38. * place an integer and some characters into that buffer,
  39. * so the maximum size for record data is a little less then
  40. * one page.
  41. */
  42. #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
  43. /*
  44. * The elements that are concurrently accessed by bottom halves are
  45. * connection_established, iucv_path_severed, local_interrupt_buffer
  46. * and receive_ready. The first three can be protected by
  47. * priv_lock. receive_ready is atomic, so it can be incremented and
  48. * decremented without holding a lock.
  49. * The variable dev_in_use needs to be protected by the lock, since
  50. * it's a flag used by open to make sure that the device is opened only
  51. * by one user at the same time.
  52. */
  53. struct vmlogrdr_priv_t {
  54. char system_service[8];
  55. char internal_name[8];
  56. char recording_name[8];
  57. struct iucv_path *path;
  58. int connection_established;
  59. int iucv_path_severed;
  60. struct iucv_message local_interrupt_buffer;
  61. atomic_t receive_ready;
  62. int minor_num;
  63. char * buffer;
  64. char * current_position;
  65. int remaining;
  66. ulong residual_length;
  67. int buffer_free;
  68. int dev_in_use; /* 1: already opened, 0: not opened*/
  69. spinlock_t priv_lock;
  70. struct device *device;
  71. struct class_device *class_device;
  72. int autorecording;
  73. int autopurge;
  74. };
  75. /*
  76. * File operation structure for vmlogrdr devices
  77. */
  78. static int vmlogrdr_open(struct inode *, struct file *);
  79. static int vmlogrdr_release(struct inode *, struct file *);
  80. static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
  81. size_t count, loff_t * ppos);
  82. static const struct file_operations vmlogrdr_fops = {
  83. .owner = THIS_MODULE,
  84. .open = vmlogrdr_open,
  85. .release = vmlogrdr_release,
  86. .read = vmlogrdr_read,
  87. };
  88. static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
  89. static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
  90. static void vmlogrdr_iucv_message_pending(struct iucv_path *,
  91. struct iucv_message *);
  92. static struct iucv_handler vmlogrdr_iucv_handler = {
  93. .path_complete = vmlogrdr_iucv_path_complete,
  94. .path_severed = vmlogrdr_iucv_path_severed,
  95. .message_pending = vmlogrdr_iucv_message_pending,
  96. };
  97. static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
  98. static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
  99. /*
  100. * pointer to system service private structure
  101. * minor number 0 --> logrec
  102. * minor number 1 --> account
  103. * minor number 2 --> symptom
  104. */
  105. static struct vmlogrdr_priv_t sys_ser[] = {
  106. { .system_service = "*LOGREC ",
  107. .internal_name = "logrec",
  108. .recording_name = "EREP",
  109. .minor_num = 0,
  110. .buffer_free = 1,
  111. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
  112. .autorecording = 1,
  113. .autopurge = 1,
  114. },
  115. { .system_service = "*ACCOUNT",
  116. .internal_name = "account",
  117. .recording_name = "ACCOUNT",
  118. .minor_num = 1,
  119. .buffer_free = 1,
  120. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
  121. .autorecording = 1,
  122. .autopurge = 1,
  123. },
  124. { .system_service = "*SYMPTOM",
  125. .internal_name = "symptom",
  126. .recording_name = "SYMPTOM",
  127. .minor_num = 2,
  128. .buffer_free = 1,
  129. .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
  130. .autorecording = 1,
  131. .autopurge = 1,
  132. }
  133. };
  134. #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
  135. static char FENCE[] = {"EOR"};
  136. static int vmlogrdr_major = 0;
  137. static struct cdev *vmlogrdr_cdev = NULL;
  138. static int recording_class_AB;
  139. static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
  140. {
  141. struct vmlogrdr_priv_t * logptr = path->private;
  142. spin_lock(&logptr->priv_lock);
  143. logptr->connection_established = 1;
  144. spin_unlock(&logptr->priv_lock);
  145. wake_up(&conn_wait_queue);
  146. }
  147. static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  148. {
  149. struct vmlogrdr_priv_t * logptr = path->private;
  150. u8 reason = (u8) ipuser[8];
  151. printk (KERN_ERR "vmlogrdr: connection severed with"
  152. " reason %i\n", reason);
  153. iucv_path_sever(path, NULL);
  154. kfree(path);
  155. logptr->path = NULL;
  156. spin_lock(&logptr->priv_lock);
  157. logptr->connection_established = 0;
  158. logptr->iucv_path_severed = 1;
  159. spin_unlock(&logptr->priv_lock);
  160. wake_up(&conn_wait_queue);
  161. /* just in case we're sleeping waiting for a record */
  162. wake_up_interruptible(&read_wait_queue);
  163. }
  164. static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
  165. struct iucv_message *msg)
  166. {
  167. struct vmlogrdr_priv_t * logptr = path->private;
  168. /*
  169. * This function is the bottom half so it should be quick.
  170. * Copy the external interrupt data into our local eib and increment
  171. * the usage count
  172. */
  173. spin_lock(&logptr->priv_lock);
  174. memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
  175. atomic_inc(&logptr->receive_ready);
  176. spin_unlock(&logptr->priv_lock);
  177. wake_up_interruptible(&read_wait_queue);
  178. }
  179. static int vmlogrdr_get_recording_class_AB(void)
  180. {
  181. char cp_command[]="QUERY COMMAND RECORDING ";
  182. char cp_response[80];
  183. char *tail;
  184. int len,i;
  185. printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
  186. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  187. printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
  188. len = strnlen(cp_response,sizeof(cp_response));
  189. // now the parsing
  190. tail=strnchr(cp_response,len,'=');
  191. if (!tail)
  192. return 0;
  193. tail++;
  194. if (!strncmp("ANY",tail,3))
  195. return 1;
  196. if (!strncmp("NONE",tail,4))
  197. return 0;
  198. /*
  199. * expect comma separated list of classes here, if one of them
  200. * is A or B return 1 otherwise 0
  201. */
  202. for (i=tail-cp_response; i<len; i++)
  203. if ( cp_response[i]=='A' || cp_response[i]=='B' )
  204. return 1;
  205. return 0;
  206. }
  207. static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
  208. int action, int purge)
  209. {
  210. char cp_command[80];
  211. char cp_response[160];
  212. char *onoff, *qid_string;
  213. memset(cp_command, 0x00, sizeof(cp_command));
  214. memset(cp_response, 0x00, sizeof(cp_response));
  215. onoff = ((action == 1) ? "ON" : "OFF");
  216. qid_string = ((recording_class_AB == 1) ? " QID * " : "");
  217. /*
  218. * The recording commands needs to be called with option QID
  219. * for guests that have previlege classes A or B.
  220. * Purging has to be done as separate step, because recording
  221. * can't be switched on as long as records are on the queue.
  222. * Doing both at the same time doesn't work.
  223. */
  224. if (purge) {
  225. snprintf(cp_command, sizeof(cp_command),
  226. "RECORDING %s PURGE %s",
  227. logptr->recording_name,
  228. qid_string);
  229. printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
  230. cp_command);
  231. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  232. printk (KERN_DEBUG "vmlogrdr: recording response: %s",
  233. cp_response);
  234. }
  235. memset(cp_command, 0x00, sizeof(cp_command));
  236. memset(cp_response, 0x00, sizeof(cp_response));
  237. snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
  238. logptr->recording_name,
  239. onoff,
  240. qid_string);
  241. printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
  242. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  243. printk (KERN_DEBUG "vmlogrdr: recording response: %s",
  244. cp_response);
  245. /* The recording command will usually answer with 'Command complete'
  246. * on success, but when the specific service was never connected
  247. * before then there might be an additional informational message
  248. * 'HCPCRC8072I Recording entry not found' before the
  249. * 'Command complete'. So I use strstr rather then the strncmp.
  250. */
  251. if (strstr(cp_response,"Command complete"))
  252. return 0;
  253. else
  254. return -EIO;
  255. }
  256. static int vmlogrdr_open (struct inode *inode, struct file *filp)
  257. {
  258. int dev_num = 0;
  259. struct vmlogrdr_priv_t * logptr = NULL;
  260. int connect_rc = 0;
  261. int ret;
  262. dev_num = iminor(inode);
  263. if (dev_num > MAXMINOR)
  264. return -ENODEV;
  265. logptr = &sys_ser[dev_num];
  266. /*
  267. * only allow for blocking reads to be open
  268. */
  269. if (filp->f_flags & O_NONBLOCK)
  270. return -ENOSYS;
  271. /* Besure this device hasn't already been opened */
  272. spin_lock_bh(&logptr->priv_lock);
  273. if (logptr->dev_in_use) {
  274. spin_unlock_bh(&logptr->priv_lock);
  275. return -EBUSY;
  276. }
  277. logptr->dev_in_use = 1;
  278. logptr->connection_established = 0;
  279. logptr->iucv_path_severed = 0;
  280. atomic_set(&logptr->receive_ready, 0);
  281. logptr->buffer_free = 1;
  282. spin_unlock_bh(&logptr->priv_lock);
  283. /* set the file options */
  284. filp->private_data = logptr;
  285. filp->f_op = &vmlogrdr_fops;
  286. /* start recording for this service*/
  287. if (logptr->autorecording) {
  288. ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
  289. if (ret)
  290. printk (KERN_WARNING "vmlogrdr: failed to start "
  291. "recording automatically\n");
  292. }
  293. /* create connection to the system service */
  294. logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
  295. if (!logptr->path)
  296. goto out_dev;
  297. connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
  298. logptr->system_service, NULL, NULL,
  299. logptr);
  300. if (connect_rc) {
  301. printk (KERN_ERR "vmlogrdr: iucv connection to %s "
  302. "failed with rc %i \n", logptr->system_service,
  303. connect_rc);
  304. goto out_path;
  305. }
  306. /* We've issued the connect and now we must wait for a
  307. * ConnectionComplete or ConnectinSevered Interrupt
  308. * before we can continue to process.
  309. */
  310. wait_event(conn_wait_queue, (logptr->connection_established)
  311. || (logptr->iucv_path_severed));
  312. if (logptr->iucv_path_severed)
  313. goto out_record;
  314. return nonseekable_open(inode, filp);
  315. out_record:
  316. if (logptr->autorecording)
  317. vmlogrdr_recording(logptr,0,logptr->autopurge);
  318. out_path:
  319. kfree(logptr->path); /* kfree(NULL) is ok. */
  320. logptr->path = NULL;
  321. out_dev:
  322. logptr->dev_in_use = 0;
  323. return -EIO;
  324. }
  325. static int vmlogrdr_release (struct inode *inode, struct file *filp)
  326. {
  327. int ret;
  328. struct vmlogrdr_priv_t * logptr = filp->private_data;
  329. iucv_path_sever(logptr->path, NULL);
  330. kfree(logptr->path);
  331. logptr->path = NULL;
  332. if (logptr->autorecording) {
  333. ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
  334. if (ret)
  335. printk (KERN_WARNING "vmlogrdr: failed to stop "
  336. "recording automatically\n");
  337. }
  338. logptr->dev_in_use = 0;
  339. return 0;
  340. }
  341. static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
  342. {
  343. int rc, *temp;
  344. /* we need to keep track of two data sizes here:
  345. * The number of bytes we need to receive from iucv and
  346. * the total number of bytes we actually write into the buffer.
  347. */
  348. int user_data_count, iucv_data_count;
  349. char * buffer;
  350. if (atomic_read(&priv->receive_ready)) {
  351. spin_lock_bh(&priv->priv_lock);
  352. if (priv->residual_length){
  353. /* receive second half of a record */
  354. iucv_data_count = priv->residual_length;
  355. user_data_count = 0;
  356. buffer = priv->buffer;
  357. } else {
  358. /* receive a new record:
  359. * We need to return the total length of the record
  360. * + size of FENCE in the first 4 bytes of the buffer.
  361. */
  362. iucv_data_count = priv->local_interrupt_buffer.length;
  363. user_data_count = sizeof(int);
  364. temp = (int*)priv->buffer;
  365. *temp= iucv_data_count + sizeof(FENCE);
  366. buffer = priv->buffer + sizeof(int);
  367. }
  368. /*
  369. * If the record is bigger then our buffer, we receive only
  370. * a part of it. We can get the rest later.
  371. */
  372. if (iucv_data_count > NET_BUFFER_SIZE)
  373. iucv_data_count = NET_BUFFER_SIZE;
  374. rc = iucv_message_receive(priv->path,
  375. &priv->local_interrupt_buffer,
  376. 0, buffer, iucv_data_count,
  377. &priv->residual_length);
  378. spin_unlock_bh(&priv->priv_lock);
  379. /* An rc of 5 indicates that the record was bigger then
  380. * the buffer, which is OK for us. A 9 indicates that the
  381. * record was purged befor we could receive it.
  382. */
  383. if (rc == 5)
  384. rc = 0;
  385. if (rc == 9)
  386. atomic_set(&priv->receive_ready, 0);
  387. } else {
  388. rc = 1;
  389. }
  390. if (!rc) {
  391. priv->buffer_free = 0;
  392. user_data_count += iucv_data_count;
  393. priv->current_position = priv->buffer;
  394. if (priv->residual_length == 0){
  395. /* the whole record has been captured,
  396. * now add the fence */
  397. atomic_dec(&priv->receive_ready);
  398. buffer = priv->buffer + user_data_count;
  399. memcpy(buffer, FENCE, sizeof(FENCE));
  400. user_data_count += sizeof(FENCE);
  401. }
  402. priv->remaining = user_data_count;
  403. }
  404. return rc;
  405. }
  406. static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
  407. size_t count, loff_t * ppos)
  408. {
  409. int rc;
  410. struct vmlogrdr_priv_t * priv = filp->private_data;
  411. while (priv->buffer_free) {
  412. rc = vmlogrdr_receive_data(priv);
  413. if (rc) {
  414. rc = wait_event_interruptible(read_wait_queue,
  415. atomic_read(&priv->receive_ready));
  416. if (rc)
  417. return rc;
  418. }
  419. }
  420. /* copy only up to end of record */
  421. if (count > priv->remaining)
  422. count = priv->remaining;
  423. if (copy_to_user(data, priv->current_position, count))
  424. return -EFAULT;
  425. *ppos += count;
  426. priv->current_position += count;
  427. priv->remaining -= count;
  428. /* if all data has been transferred, set buffer free */
  429. if (priv->remaining == 0)
  430. priv->buffer_free = 1;
  431. return count;
  432. }
  433. static ssize_t vmlogrdr_autopurge_store(struct device * dev,
  434. struct device_attribute *attr,
  435. const char * buf, size_t count)
  436. {
  437. struct vmlogrdr_priv_t *priv = dev->driver_data;
  438. ssize_t ret = count;
  439. switch (buf[0]) {
  440. case '0':
  441. priv->autopurge=0;
  442. break;
  443. case '1':
  444. priv->autopurge=1;
  445. break;
  446. default:
  447. ret = -EINVAL;
  448. }
  449. return ret;
  450. }
  451. static ssize_t vmlogrdr_autopurge_show(struct device *dev,
  452. struct device_attribute *attr,
  453. char *buf)
  454. {
  455. struct vmlogrdr_priv_t *priv = dev->driver_data;
  456. return sprintf(buf, "%u\n", priv->autopurge);
  457. }
  458. static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
  459. vmlogrdr_autopurge_store);
  460. static ssize_t vmlogrdr_purge_store(struct device * dev,
  461. struct device_attribute *attr,
  462. const char * buf, size_t count)
  463. {
  464. char cp_command[80];
  465. char cp_response[80];
  466. struct vmlogrdr_priv_t *priv = dev->driver_data;
  467. if (buf[0] != '1')
  468. return -EINVAL;
  469. memset(cp_command, 0x00, sizeof(cp_command));
  470. memset(cp_response, 0x00, sizeof(cp_response));
  471. /*
  472. * The recording command needs to be called with option QID
  473. * for guests that have previlege classes A or B.
  474. * Other guests will not recognize the command and we have to
  475. * issue the same command without the QID parameter.
  476. */
  477. if (recording_class_AB)
  478. snprintf(cp_command, sizeof(cp_command),
  479. "RECORDING %s PURGE QID * ",
  480. priv->recording_name);
  481. else
  482. snprintf(cp_command, sizeof(cp_command),
  483. "RECORDING %s PURGE ",
  484. priv->recording_name);
  485. printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
  486. cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
  487. printk (KERN_DEBUG "vmlogrdr: recording response: %s",
  488. cp_response);
  489. return count;
  490. }
  491. static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
  492. static ssize_t vmlogrdr_autorecording_store(struct device *dev,
  493. struct device_attribute *attr,
  494. const char *buf, size_t count)
  495. {
  496. struct vmlogrdr_priv_t *priv = dev->driver_data;
  497. ssize_t ret = count;
  498. switch (buf[0]) {
  499. case '0':
  500. priv->autorecording=0;
  501. break;
  502. case '1':
  503. priv->autorecording=1;
  504. break;
  505. default:
  506. ret = -EINVAL;
  507. }
  508. return ret;
  509. }
  510. static ssize_t vmlogrdr_autorecording_show(struct device *dev,
  511. struct device_attribute *attr,
  512. char *buf)
  513. {
  514. struct vmlogrdr_priv_t *priv = dev->driver_data;
  515. return sprintf(buf, "%u\n", priv->autorecording);
  516. }
  517. static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
  518. vmlogrdr_autorecording_store);
  519. static ssize_t vmlogrdr_recording_store(struct device * dev,
  520. struct device_attribute *attr,
  521. const char * buf, size_t count)
  522. {
  523. struct vmlogrdr_priv_t *priv = dev->driver_data;
  524. ssize_t ret;
  525. switch (buf[0]) {
  526. case '0':
  527. ret = vmlogrdr_recording(priv,0,0);
  528. break;
  529. case '1':
  530. ret = vmlogrdr_recording(priv,1,0);
  531. break;
  532. default:
  533. ret = -EINVAL;
  534. }
  535. if (ret)
  536. return ret;
  537. else
  538. return count;
  539. }
  540. static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
  541. static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
  542. char *buf)
  543. {
  544. char cp_command[] = "QUERY RECORDING ";
  545. int len;
  546. cpcmd(cp_command, buf, 4096, NULL);
  547. len = strlen(buf);
  548. return len;
  549. }
  550. static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
  551. NULL);
  552. static struct attribute *vmlogrdr_attrs[] = {
  553. &dev_attr_autopurge.attr,
  554. &dev_attr_purge.attr,
  555. &dev_attr_autorecording.attr,
  556. &dev_attr_recording.attr,
  557. NULL,
  558. };
  559. static struct attribute_group vmlogrdr_attr_group = {
  560. .attrs = vmlogrdr_attrs,
  561. };
  562. static struct class *vmlogrdr_class;
  563. static struct device_driver vmlogrdr_driver = {
  564. .name = "vmlogrdr",
  565. .bus = &iucv_bus,
  566. };
  567. static int vmlogrdr_register_driver(void)
  568. {
  569. int ret;
  570. /* Register with iucv driver */
  571. ret = iucv_register(&vmlogrdr_iucv_handler, 1);
  572. if (ret) {
  573. printk (KERN_ERR "vmlogrdr: failed to register with"
  574. "iucv driver\n");
  575. goto out;
  576. }
  577. ret = driver_register(&vmlogrdr_driver);
  578. if (ret) {
  579. printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
  580. goto out_iucv;
  581. }
  582. ret = driver_create_file(&vmlogrdr_driver,
  583. &driver_attr_recording_status);
  584. if (ret) {
  585. printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
  586. goto out_driver;
  587. }
  588. vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
  589. if (IS_ERR(vmlogrdr_class)) {
  590. printk(KERN_ERR "vmlogrdr: failed to create class.\n");
  591. ret = PTR_ERR(vmlogrdr_class);
  592. vmlogrdr_class = NULL;
  593. goto out_attr;
  594. }
  595. return 0;
  596. out_attr:
  597. driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
  598. out_driver:
  599. driver_unregister(&vmlogrdr_driver);
  600. out_iucv:
  601. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  602. out:
  603. return ret;
  604. }
  605. static void vmlogrdr_unregister_driver(void)
  606. {
  607. class_destroy(vmlogrdr_class);
  608. vmlogrdr_class = NULL;
  609. driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
  610. driver_unregister(&vmlogrdr_driver);
  611. iucv_unregister(&vmlogrdr_iucv_handler, 1);
  612. }
  613. static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
  614. {
  615. struct device *dev;
  616. int ret;
  617. dev = kzalloc(sizeof(struct device), GFP_KERNEL);
  618. if (dev) {
  619. snprintf(dev->bus_id, BUS_ID_SIZE, "%s",
  620. priv->internal_name);
  621. dev->bus = &iucv_bus;
  622. dev->parent = iucv_root;
  623. dev->driver = &vmlogrdr_driver;
  624. /*
  625. * The release function could be called after the
  626. * module has been unloaded. It's _only_ task is to
  627. * free the struct. Therefore, we specify kfree()
  628. * directly here. (Probably a little bit obfuscating
  629. * but legitime ...).
  630. */
  631. dev->release = (void (*)(struct device *))kfree;
  632. } else
  633. return -ENOMEM;
  634. ret = device_register(dev);
  635. if (ret)
  636. return ret;
  637. ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
  638. if (ret) {
  639. device_unregister(dev);
  640. return ret;
  641. }
  642. priv->class_device = class_device_create(
  643. vmlogrdr_class,
  644. NULL,
  645. MKDEV(vmlogrdr_major, priv->minor_num),
  646. dev,
  647. "%s", dev->bus_id );
  648. if (IS_ERR(priv->class_device)) {
  649. ret = PTR_ERR(priv->class_device);
  650. priv->class_device=NULL;
  651. sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
  652. device_unregister(dev);
  653. return ret;
  654. }
  655. dev->driver_data = priv;
  656. priv->device = dev;
  657. return 0;
  658. }
  659. static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
  660. {
  661. class_device_destroy(vmlogrdr_class,
  662. MKDEV(vmlogrdr_major, priv->minor_num));
  663. if (priv->device != NULL) {
  664. sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
  665. device_unregister(priv->device);
  666. priv->device=NULL;
  667. }
  668. return 0;
  669. }
  670. static int vmlogrdr_register_cdev(dev_t dev)
  671. {
  672. int rc = 0;
  673. vmlogrdr_cdev = cdev_alloc();
  674. if (!vmlogrdr_cdev) {
  675. return -ENOMEM;
  676. }
  677. vmlogrdr_cdev->owner = THIS_MODULE;
  678. vmlogrdr_cdev->ops = &vmlogrdr_fops;
  679. vmlogrdr_cdev->dev = dev;
  680. rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
  681. if (!rc)
  682. return 0;
  683. // cleanup: cdev is not fully registered, no cdev_del here!
  684. kobject_put(&vmlogrdr_cdev->kobj);
  685. vmlogrdr_cdev=NULL;
  686. return rc;
  687. }
  688. static void vmlogrdr_cleanup(void)
  689. {
  690. int i;
  691. if (vmlogrdr_cdev) {
  692. cdev_del(vmlogrdr_cdev);
  693. vmlogrdr_cdev=NULL;
  694. }
  695. for (i=0; i < MAXMINOR; ++i ) {
  696. vmlogrdr_unregister_device(&sys_ser[i]);
  697. free_page((unsigned long)sys_ser[i].buffer);
  698. }
  699. vmlogrdr_unregister_driver();
  700. if (vmlogrdr_major) {
  701. unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
  702. vmlogrdr_major=0;
  703. }
  704. }
  705. static int vmlogrdr_init(void)
  706. {
  707. int rc;
  708. int i;
  709. dev_t dev;
  710. if (! MACHINE_IS_VM) {
  711. printk (KERN_ERR "vmlogrdr: not running under VM, "
  712. "driver not loaded.\n");
  713. return -ENODEV;
  714. }
  715. recording_class_AB = vmlogrdr_get_recording_class_AB();
  716. rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
  717. if (rc)
  718. return rc;
  719. vmlogrdr_major = MAJOR(dev);
  720. rc=vmlogrdr_register_driver();
  721. if (rc)
  722. goto cleanup;
  723. for (i=0; i < MAXMINOR; ++i ) {
  724. sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
  725. if (!sys_ser[i].buffer) {
  726. rc = ENOMEM;
  727. break;
  728. }
  729. sys_ser[i].current_position = sys_ser[i].buffer;
  730. rc=vmlogrdr_register_device(&sys_ser[i]);
  731. if (rc)
  732. break;
  733. }
  734. if (rc)
  735. goto cleanup;
  736. rc = vmlogrdr_register_cdev(dev);
  737. if (rc)
  738. goto cleanup;
  739. printk (KERN_INFO "vmlogrdr: driver loaded\n");
  740. return 0;
  741. cleanup:
  742. vmlogrdr_cleanup();
  743. printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
  744. return rc;
  745. }
  746. static void vmlogrdr_exit(void)
  747. {
  748. vmlogrdr_cleanup();
  749. printk (KERN_INFO "vmlogrdr: driver unloaded\n");
  750. return;
  751. }
  752. module_init(vmlogrdr_init);
  753. module_exit(vmlogrdr_exit);