dasd_eer.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /*
  2. * Character device driver for extended error reporting.
  3. *
  4. * Copyright (C) 2005 IBM Corporation
  5. * extended error reporting for DASD ECKD devices
  6. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  7. */
  8. #include <linux/init.h>
  9. #include <linux/fs.h>
  10. #include <linux/kernel.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/device.h>
  15. #include <linux/poll.h>
  16. #include <linux/mutex.h>
  17. #include <linux/smp_lock.h>
  18. #include <linux/err.h>
  19. #include <asm/uaccess.h>
  20. #include <asm/atomic.h>
  21. #include <asm/ebcdic.h>
  22. #include "dasd_int.h"
  23. #include "dasd_eckd.h"
  24. #ifdef PRINTK_HEADER
  25. #undef PRINTK_HEADER
  26. #endif /* PRINTK_HEADER */
  27. #define PRINTK_HEADER "dasd(eer):"
  28. /*
  29. * SECTION: the internal buffer
  30. */
  31. /*
  32. * The internal buffer is meant to store obaque blobs of data, so it does
  33. * not know of higher level concepts like triggers.
  34. * It consists of a number of pages that are used as a ringbuffer. Each data
  35. * blob is stored in a simple record that consists of an integer, which
  36. * contains the size of the following data, and the data bytes themselfes.
  37. *
  38. * To allow for multiple independent readers we create one internal buffer
  39. * each time the device is opened and destroy the buffer when the file is
  40. * closed again. The number of pages used for this buffer is determined by
  41. * the module parmeter eer_pages.
  42. *
  43. * One record can be written to a buffer by using the functions
  44. * - dasd_eer_start_record (one time per record to write the size to the
  45. * buffer and reserve the space for the data)
  46. * - dasd_eer_write_buffer (one or more times per record to write the data)
  47. * The data can be written in several steps but you will have to compute
  48. * the total size up front for the invocation of dasd_eer_start_record.
  49. * If the ringbuffer is full, dasd_eer_start_record will remove the required
  50. * number of old records.
  51. *
  52. * A record is typically read in two steps, first read the integer that
  53. * specifies the size of the following data, then read the data.
  54. * Both can be done by
  55. * - dasd_eer_read_buffer
  56. *
  57. * For all mentioned functions you need to get the bufferlock first and keep
  58. * it until a complete record is written or read.
  59. *
  60. * All information necessary to keep track of an internal buffer is kept in
  61. * a struct eerbuffer. The buffer specific to a file pointer is strored in
  62. * the private_data field of that file. To be able to write data to all
  63. * existing buffers, each buffer is also added to the bufferlist.
  64. * If the user does not want to read a complete record in one go, we have to
  65. * keep track of the rest of the record. residual stores the number of bytes
  66. * that are still to deliver. If the rest of the record is invalidated between
  67. * two reads then residual will be set to -1 so that the next read will fail.
  68. * All entries in the eerbuffer structure are protected with the bufferlock.
  69. * To avoid races between writing to a buffer on the one side and creating
  70. * and destroying buffers on the other side, the bufferlock must also be used
  71. * to protect the bufferlist.
  72. */
  73. static int eer_pages = 5;
  74. module_param(eer_pages, int, S_IRUGO|S_IWUSR);
  75. struct eerbuffer {
  76. struct list_head list;
  77. char **buffer;
  78. int buffersize;
  79. int buffer_page_count;
  80. int head;
  81. int tail;
  82. int residual;
  83. };
  84. static LIST_HEAD(bufferlist);
  85. static DEFINE_SPINLOCK(bufferlock);
  86. static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
  87. /*
  88. * How many free bytes are available on the buffer.
  89. * Needs to be called with bufferlock held.
  90. */
  91. static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
  92. {
  93. if (eerb->head < eerb->tail)
  94. return eerb->tail - eerb->head - 1;
  95. return eerb->buffersize - eerb->head + eerb->tail -1;
  96. }
  97. /*
  98. * How many bytes of buffer space are used.
  99. * Needs to be called with bufferlock held.
  100. */
  101. static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
  102. {
  103. if (eerb->head >= eerb->tail)
  104. return eerb->head - eerb->tail;
  105. return eerb->buffersize - eerb->tail + eerb->head;
  106. }
  107. /*
  108. * The dasd_eer_write_buffer function just copies count bytes of data
  109. * to the buffer. Make sure to call dasd_eer_start_record first, to
  110. * make sure that enough free space is available.
  111. * Needs to be called with bufferlock held.
  112. */
  113. static void dasd_eer_write_buffer(struct eerbuffer *eerb,
  114. char *data, int count)
  115. {
  116. unsigned long headindex,localhead;
  117. unsigned long rest, len;
  118. char *nextdata;
  119. nextdata = data;
  120. rest = count;
  121. while (rest > 0) {
  122. headindex = eerb->head / PAGE_SIZE;
  123. localhead = eerb->head % PAGE_SIZE;
  124. len = min(rest, PAGE_SIZE - localhead);
  125. memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
  126. nextdata += len;
  127. rest -= len;
  128. eerb->head += len;
  129. if (eerb->head == eerb->buffersize)
  130. eerb->head = 0; /* wrap around */
  131. BUG_ON(eerb->head > eerb->buffersize);
  132. }
  133. }
  134. /*
  135. * Needs to be called with bufferlock held.
  136. */
  137. static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
  138. {
  139. unsigned long tailindex,localtail;
  140. unsigned long rest, len, finalcount;
  141. char *nextdata;
  142. finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
  143. nextdata = data;
  144. rest = finalcount;
  145. while (rest > 0) {
  146. tailindex = eerb->tail / PAGE_SIZE;
  147. localtail = eerb->tail % PAGE_SIZE;
  148. len = min(rest, PAGE_SIZE - localtail);
  149. memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
  150. nextdata += len;
  151. rest -= len;
  152. eerb->tail += len;
  153. if (eerb->tail == eerb->buffersize)
  154. eerb->tail = 0; /* wrap around */
  155. BUG_ON(eerb->tail > eerb->buffersize);
  156. }
  157. return finalcount;
  158. }
  159. /*
  160. * Whenever you want to write a blob of data to the internal buffer you
  161. * have to start by using this function first. It will write the number
  162. * of bytes that will be written to the buffer. If necessary it will remove
  163. * old records to make room for the new one.
  164. * Needs to be called with bufferlock held.
  165. */
  166. static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
  167. {
  168. int tailcount;
  169. if (count + sizeof(count) > eerb->buffersize)
  170. return -ENOMEM;
  171. while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
  172. if (eerb->residual > 0) {
  173. eerb->tail += eerb->residual;
  174. if (eerb->tail >= eerb->buffersize)
  175. eerb->tail -= eerb->buffersize;
  176. eerb->residual = -1;
  177. }
  178. dasd_eer_read_buffer(eerb, (char *) &tailcount,
  179. sizeof(tailcount));
  180. eerb->tail += tailcount;
  181. if (eerb->tail >= eerb->buffersize)
  182. eerb->tail -= eerb->buffersize;
  183. }
  184. dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
  185. return 0;
  186. };
  187. /*
  188. * Release pages that are not used anymore.
  189. */
  190. static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
  191. {
  192. int i;
  193. for (i = 0; i < no_pages; i++)
  194. free_page((unsigned long) buf[i]);
  195. }
  196. /*
  197. * Allocate a new set of memory pages.
  198. */
  199. static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
  200. {
  201. int i;
  202. for (i = 0; i < no_pages; i++) {
  203. buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
  204. if (!buf[i]) {
  205. dasd_eer_free_buffer_pages(buf, i);
  206. return -ENOMEM;
  207. }
  208. }
  209. return 0;
  210. }
  211. /*
  212. * SECTION: The extended error reporting functionality
  213. */
  214. /*
  215. * When a DASD device driver wants to report an error, it calls the
  216. * function dasd_eer_write and gives the respective trigger ID as
  217. * parameter. Currently there are four kinds of triggers:
  218. *
  219. * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
  220. * DASD_EER_PPRCSUSPEND: PPRC was suspended
  221. * DASD_EER_NOPATH: There is no path to the device left.
  222. * DASD_EER_STATECHANGE: The state of the device has changed.
  223. *
  224. * For the first three triggers all required information can be supplied by
  225. * the caller. For these triggers a record is written by the function
  226. * dasd_eer_write_standard_trigger.
  227. *
  228. * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
  229. * status ccw need to be executed to gather the necessary sense data first.
  230. * The dasd_eer_snss function will queue the SNSS request and the request
  231. * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
  232. * trigger.
  233. *
  234. * To avoid memory allocations at runtime, the necessary memory is allocated
  235. * when the extended error reporting is enabled for a device (by
  236. * dasd_eer_probe). There is one sense subsystem status request for each
  237. * eer enabled DASD device. The presence of the cqr in device->eer_cqr
  238. * indicates that eer is enable for the device. The use of the snss request
  239. * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
  240. * that the cqr is currently in use, dasd_eer_snss cannot start a second
  241. * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
  242. * the SNSS request will check the bit and call dasd_eer_snss again.
  243. */
  244. #define SNSS_DATA_SIZE 44
  245. #define DASD_EER_BUSID_SIZE 10
  246. struct dasd_eer_header {
  247. __u32 total_size;
  248. __u32 trigger;
  249. __u64 tv_sec;
  250. __u64 tv_usec;
  251. char busid[DASD_EER_BUSID_SIZE];
  252. } __attribute__ ((packed));
  253. /*
  254. * The following function can be used for those triggers that have
  255. * all necessary data available when the function is called.
  256. * If the parameter cqr is not NULL, the chain of requests will be searched
  257. * for valid sense data, and all valid sense data sets will be added to
  258. * the triggers data.
  259. */
  260. static void dasd_eer_write_standard_trigger(struct dasd_device *device,
  261. struct dasd_ccw_req *cqr,
  262. int trigger)
  263. {
  264. struct dasd_ccw_req *temp_cqr;
  265. int data_size;
  266. struct timeval tv;
  267. struct dasd_eer_header header;
  268. unsigned long flags;
  269. struct eerbuffer *eerb;
  270. /* go through cqr chain and count the valid sense data sets */
  271. data_size = 0;
  272. for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
  273. if (temp_cqr->irb.esw.esw0.erw.cons)
  274. data_size += 32;
  275. header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
  276. header.trigger = trigger;
  277. do_gettimeofday(&tv);
  278. header.tv_sec = tv.tv_sec;
  279. header.tv_usec = tv.tv_usec;
  280. strncpy(header.busid, dev_name(&device->cdev->dev),
  281. DASD_EER_BUSID_SIZE);
  282. spin_lock_irqsave(&bufferlock, flags);
  283. list_for_each_entry(eerb, &bufferlist, list) {
  284. dasd_eer_start_record(eerb, header.total_size);
  285. dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
  286. for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
  287. if (temp_cqr->irb.esw.esw0.erw.cons)
  288. dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32);
  289. dasd_eer_write_buffer(eerb, "EOR", 4);
  290. }
  291. spin_unlock_irqrestore(&bufferlock, flags);
  292. wake_up_interruptible(&dasd_eer_read_wait_queue);
  293. }
  294. /*
  295. * This function writes a DASD_EER_STATECHANGE trigger.
  296. */
  297. static void dasd_eer_write_snss_trigger(struct dasd_device *device,
  298. struct dasd_ccw_req *cqr,
  299. int trigger)
  300. {
  301. int data_size;
  302. int snss_rc;
  303. struct timeval tv;
  304. struct dasd_eer_header header;
  305. unsigned long flags;
  306. struct eerbuffer *eerb;
  307. snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
  308. if (snss_rc)
  309. data_size = 0;
  310. else
  311. data_size = SNSS_DATA_SIZE;
  312. header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
  313. header.trigger = DASD_EER_STATECHANGE;
  314. do_gettimeofday(&tv);
  315. header.tv_sec = tv.tv_sec;
  316. header.tv_usec = tv.tv_usec;
  317. strncpy(header.busid, dev_name(&device->cdev->dev),
  318. DASD_EER_BUSID_SIZE);
  319. spin_lock_irqsave(&bufferlock, flags);
  320. list_for_each_entry(eerb, &bufferlist, list) {
  321. dasd_eer_start_record(eerb, header.total_size);
  322. dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
  323. if (!snss_rc)
  324. dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
  325. dasd_eer_write_buffer(eerb, "EOR", 4);
  326. }
  327. spin_unlock_irqrestore(&bufferlock, flags);
  328. wake_up_interruptible(&dasd_eer_read_wait_queue);
  329. }
  330. /*
  331. * This function is called for all triggers. It calls the appropriate
  332. * function that writes the actual trigger records.
  333. */
  334. void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
  335. unsigned int id)
  336. {
  337. if (!device->eer_cqr)
  338. return;
  339. switch (id) {
  340. case DASD_EER_FATALERROR:
  341. case DASD_EER_PPRCSUSPEND:
  342. dasd_eer_write_standard_trigger(device, cqr, id);
  343. break;
  344. case DASD_EER_NOPATH:
  345. dasd_eer_write_standard_trigger(device, NULL, id);
  346. break;
  347. case DASD_EER_STATECHANGE:
  348. dasd_eer_write_snss_trigger(device, cqr, id);
  349. break;
  350. default: /* unknown trigger, so we write it without any sense data */
  351. dasd_eer_write_standard_trigger(device, NULL, id);
  352. break;
  353. }
  354. }
  355. EXPORT_SYMBOL(dasd_eer_write);
  356. /*
  357. * Start a sense subsystem status request.
  358. * Needs to be called with the device held.
  359. */
  360. void dasd_eer_snss(struct dasd_device *device)
  361. {
  362. struct dasd_ccw_req *cqr;
  363. cqr = device->eer_cqr;
  364. if (!cqr) /* Device not eer enabled. */
  365. return;
  366. if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
  367. /* Sense subsystem status request in use. */
  368. set_bit(DASD_FLAG_EER_SNSS, &device->flags);
  369. return;
  370. }
  371. /* cdev is already locked, can't use dasd_add_request_head */
  372. clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
  373. cqr->status = DASD_CQR_QUEUED;
  374. list_add(&cqr->devlist, &device->ccw_queue);
  375. dasd_schedule_device_bh(device);
  376. }
  377. /*
  378. * Callback function for use with sense subsystem status request.
  379. */
  380. static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
  381. {
  382. struct dasd_device *device = cqr->startdev;
  383. unsigned long flags;
  384. dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
  385. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  386. if (device->eer_cqr == cqr) {
  387. clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
  388. if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
  389. /* Another SNSS has been requested in the meantime. */
  390. dasd_eer_snss(device);
  391. cqr = NULL;
  392. }
  393. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  394. if (cqr)
  395. /*
  396. * Extended error recovery has been switched off while
  397. * the SNSS request was running. It could even have
  398. * been switched off and on again in which case there
  399. * is a new ccw in device->eer_cqr. Free the "old"
  400. * snss request now.
  401. */
  402. dasd_kfree_request(cqr, device);
  403. }
  404. /*
  405. * Enable error reporting on a given device.
  406. */
  407. int dasd_eer_enable(struct dasd_device *device)
  408. {
  409. struct dasd_ccw_req *cqr;
  410. unsigned long flags;
  411. if (device->eer_cqr)
  412. return 0;
  413. if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
  414. return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
  415. cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */,
  416. SNSS_DATA_SIZE, device);
  417. if (IS_ERR(cqr))
  418. return -ENOMEM;
  419. cqr->startdev = device;
  420. cqr->retries = 255;
  421. cqr->expires = 10 * HZ;
  422. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  423. cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
  424. cqr->cpaddr->count = SNSS_DATA_SIZE;
  425. cqr->cpaddr->flags = 0;
  426. cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
  427. cqr->buildclk = get_clock();
  428. cqr->status = DASD_CQR_FILLED;
  429. cqr->callback = dasd_eer_snss_cb;
  430. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  431. if (!device->eer_cqr) {
  432. device->eer_cqr = cqr;
  433. cqr = NULL;
  434. }
  435. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  436. if (cqr)
  437. dasd_kfree_request(cqr, device);
  438. return 0;
  439. }
  440. /*
  441. * Disable error reporting on a given device.
  442. */
  443. void dasd_eer_disable(struct dasd_device *device)
  444. {
  445. struct dasd_ccw_req *cqr;
  446. unsigned long flags;
  447. int in_use;
  448. if (!device->eer_cqr)
  449. return;
  450. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  451. cqr = device->eer_cqr;
  452. device->eer_cqr = NULL;
  453. clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
  454. in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
  455. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  456. if (cqr && !in_use)
  457. dasd_kfree_request(cqr, device);
  458. }
  459. /*
  460. * SECTION: the device operations
  461. */
  462. /*
  463. * On the one side we need a lock to access our internal buffer, on the
  464. * other side a copy_to_user can sleep. So we need to copy the data we have
  465. * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
  466. */
  467. static char readbuffer[PAGE_SIZE];
  468. static DEFINE_MUTEX(readbuffer_mutex);
  469. static int dasd_eer_open(struct inode *inp, struct file *filp)
  470. {
  471. struct eerbuffer *eerb;
  472. unsigned long flags;
  473. eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
  474. if (!eerb)
  475. return -ENOMEM;
  476. lock_kernel();
  477. eerb->buffer_page_count = eer_pages;
  478. if (eerb->buffer_page_count < 1 ||
  479. eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
  480. kfree(eerb);
  481. MESSAGE(KERN_WARNING, "can't open device since module "
  482. "parameter eer_pages is smaller then 1 or"
  483. " bigger then %d", (int)(INT_MAX / PAGE_SIZE));
  484. unlock_kernel();
  485. return -EINVAL;
  486. }
  487. eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
  488. eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
  489. GFP_KERNEL);
  490. if (!eerb->buffer) {
  491. kfree(eerb);
  492. unlock_kernel();
  493. return -ENOMEM;
  494. }
  495. if (dasd_eer_allocate_buffer_pages(eerb->buffer,
  496. eerb->buffer_page_count)) {
  497. kfree(eerb->buffer);
  498. kfree(eerb);
  499. unlock_kernel();
  500. return -ENOMEM;
  501. }
  502. filp->private_data = eerb;
  503. spin_lock_irqsave(&bufferlock, flags);
  504. list_add(&eerb->list, &bufferlist);
  505. spin_unlock_irqrestore(&bufferlock, flags);
  506. unlock_kernel();
  507. return nonseekable_open(inp,filp);
  508. }
  509. static int dasd_eer_close(struct inode *inp, struct file *filp)
  510. {
  511. struct eerbuffer *eerb;
  512. unsigned long flags;
  513. eerb = (struct eerbuffer *) filp->private_data;
  514. spin_lock_irqsave(&bufferlock, flags);
  515. list_del(&eerb->list);
  516. spin_unlock_irqrestore(&bufferlock, flags);
  517. dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
  518. kfree(eerb->buffer);
  519. kfree(eerb);
  520. return 0;
  521. }
  522. static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
  523. size_t count, loff_t *ppos)
  524. {
  525. int tc,rc;
  526. int tailcount,effective_count;
  527. unsigned long flags;
  528. struct eerbuffer *eerb;
  529. eerb = (struct eerbuffer *) filp->private_data;
  530. if (mutex_lock_interruptible(&readbuffer_mutex))
  531. return -ERESTARTSYS;
  532. spin_lock_irqsave(&bufferlock, flags);
  533. if (eerb->residual < 0) { /* the remainder of this record */
  534. /* has been deleted */
  535. eerb->residual = 0;
  536. spin_unlock_irqrestore(&bufferlock, flags);
  537. mutex_unlock(&readbuffer_mutex);
  538. return -EIO;
  539. } else if (eerb->residual > 0) {
  540. /* OK we still have a second half of a record to deliver */
  541. effective_count = min(eerb->residual, (int) count);
  542. eerb->residual -= effective_count;
  543. } else {
  544. tc = 0;
  545. while (!tc) {
  546. tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
  547. sizeof(tailcount));
  548. if (!tc) {
  549. /* no data available */
  550. spin_unlock_irqrestore(&bufferlock, flags);
  551. mutex_unlock(&readbuffer_mutex);
  552. if (filp->f_flags & O_NONBLOCK)
  553. return -EAGAIN;
  554. rc = wait_event_interruptible(
  555. dasd_eer_read_wait_queue,
  556. eerb->head != eerb->tail);
  557. if (rc)
  558. return rc;
  559. if (mutex_lock_interruptible(&readbuffer_mutex))
  560. return -ERESTARTSYS;
  561. spin_lock_irqsave(&bufferlock, flags);
  562. }
  563. }
  564. WARN_ON(tc != sizeof(tailcount));
  565. effective_count = min(tailcount,(int)count);
  566. eerb->residual = tailcount - effective_count;
  567. }
  568. tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
  569. WARN_ON(tc != effective_count);
  570. spin_unlock_irqrestore(&bufferlock, flags);
  571. if (copy_to_user(buf, readbuffer, effective_count)) {
  572. mutex_unlock(&readbuffer_mutex);
  573. return -EFAULT;
  574. }
  575. mutex_unlock(&readbuffer_mutex);
  576. return effective_count;
  577. }
  578. static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
  579. {
  580. unsigned int mask;
  581. unsigned long flags;
  582. struct eerbuffer *eerb;
  583. eerb = (struct eerbuffer *) filp->private_data;
  584. poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
  585. spin_lock_irqsave(&bufferlock, flags);
  586. if (eerb->head != eerb->tail)
  587. mask = POLLIN | POLLRDNORM ;
  588. else
  589. mask = 0;
  590. spin_unlock_irqrestore(&bufferlock, flags);
  591. return mask;
  592. }
  593. static const struct file_operations dasd_eer_fops = {
  594. .open = &dasd_eer_open,
  595. .release = &dasd_eer_close,
  596. .read = &dasd_eer_read,
  597. .poll = &dasd_eer_poll,
  598. .owner = THIS_MODULE,
  599. };
  600. static struct miscdevice *dasd_eer_dev = NULL;
  601. int __init dasd_eer_init(void)
  602. {
  603. int rc;
  604. dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
  605. if (!dasd_eer_dev)
  606. return -ENOMEM;
  607. dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
  608. dasd_eer_dev->name = "dasd_eer";
  609. dasd_eer_dev->fops = &dasd_eer_fops;
  610. rc = misc_register(dasd_eer_dev);
  611. if (rc) {
  612. kfree(dasd_eer_dev);
  613. dasd_eer_dev = NULL;
  614. MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
  615. "register misc device");
  616. return rc;
  617. }
  618. return 0;
  619. }
  620. void dasd_eer_exit(void)
  621. {
  622. if (dasd_eer_dev) {
  623. WARN_ON(misc_deregister(dasd_eer_dev) != 0);
  624. kfree(dasd_eer_dev);
  625. dasd_eer_dev = NULL;
  626. }
  627. }