dasd_eer.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090
  1. /*
  2. * character device driver for extended error reporting
  3. *
  4. *
  5. * Copyright (C) 2005 IBM Corporation
  6. * extended error reporting for DASD ECKD devices
  7. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  8. *
  9. */
  10. #include <linux/init.h>
  11. #include <linux/fs.h>
  12. #include <linux/kernel.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/module.h>
  15. #include <linux/moduleparam.h>
  16. #include <linux/device.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/poll.h>
  19. #include <linux/notifier.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/semaphore.h>
  22. #include <asm/atomic.h>
  23. #include <asm/ebcdic.h>
  24. #include "dasd_int.h"
  25. #include "dasd_eckd.h"
  26. MODULE_LICENSE("GPL");
  27. MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>");
  28. MODULE_DESCRIPTION("DASD extended error reporting module");
  29. #ifdef PRINTK_HEADER
  30. #undef PRINTK_HEADER
  31. #endif /* PRINTK_HEADER */
  32. #define PRINTK_HEADER "dasd(eer):"
  33. /*****************************************************************************/
  34. /* the internal buffer */
  35. /*****************************************************************************/
  36. /*
  37. * The internal buffer is meant to store obaque blobs of data, so it doesn't
  38. * know of higher level concepts like triggers.
  39. * It consists of a number of pages that are used as a ringbuffer. Each data
  40. * blob is stored in a simple record that consists of an integer, which
  41. * contains the size of the following data, and the data bytes themselfes.
  42. *
  43. * To allow for multiple independent readers we create one internal buffer
  44. * each time the device is opened and destroy the buffer when the file is
  45. * closed again.
  46. *
  47. * One record can be written to a buffer by using the functions
  48. * - dasd_eer_start_record (one time per record to write the size to the buffer
  49. * and reserve the space for the data)
  50. * - dasd_eer_write_buffer (one or more times per record to write the data)
  51. * The data can be written in several steps but you will have to compute
  52. * the total size up front for the invocation of dasd_eer_start_record.
  53. * If the ringbuffer is full, dasd_eer_start_record will remove the required
  54. * number of old records.
  55. *
  56. * A record is typically read in two steps, first read the integer that
  57. * specifies the size of the following data, then read the data.
  58. * Both can be done by
  59. * - dasd_eer_read_buffer
  60. *
  61. * For all mentioned functions you need to get the bufferlock first and keep it
  62. * until a complete record is written or read.
  63. */
  64. /*
  65. * Alle information necessary to keep track of an internal buffer is kept in
  66. * a struct eerbuffer. The buffer specific to a file pointer is strored in
  67. * the private_data field of that file. To be able to write data to all
  68. * existing buffers, each buffer is also added to the bufferlist.
  69. * If the user doesn't want to read a complete record in one go, we have to
  70. * keep track of the rest of the record. residual stores the number of bytes
  71. * that are still to deliver. If the rest of the record is invalidated between
  72. * two reads then residual will be set to -1 so that the next read will fail.
  73. * All entries in the eerbuffer structure are protected with the bufferlock.
  74. * To avoid races between writing to a buffer on the one side and creating
  75. * and destroying buffers on the other side, the bufferlock must also be used
  76. * to protect the bufferlist.
  77. */
  78. struct eerbuffer {
  79. struct list_head list;
  80. char **buffer;
  81. int buffersize;
  82. int buffer_page_count;
  83. int head;
  84. int tail;
  85. int residual;
  86. };
  87. LIST_HEAD(bufferlist);
  88. static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
  89. DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
  90. /*
  91. * How many free bytes are available on the buffer.
  92. * needs to be called with bufferlock held
  93. */
  94. static int
  95. dasd_eer_get_free_bytes(struct eerbuffer *eerb)
  96. {
  97. if (eerb->head < eerb->tail) {
  98. return eerb->tail - eerb->head - 1;
  99. } else
  100. return eerb->buffersize - eerb->head + eerb->tail -1;
  101. }
  102. /*
  103. * How many bytes of buffer space are used.
  104. * needs to be called with bufferlock held
  105. */
  106. static int
  107. dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
  108. {
  109. if (eerb->head >= eerb->tail) {
  110. return eerb->head - eerb->tail;
  111. } else
  112. return eerb->buffersize - eerb->tail + eerb->head;
  113. }
  114. /*
  115. * The dasd_eer_write_buffer function just copies count bytes of data
  116. * to the buffer. Make sure to call dasd_eer_start_record first, to
  117. * make sure that enough free space is available.
  118. * needs to be called with bufferlock held
  119. */
  120. static void
  121. dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data)
  122. {
  123. unsigned long headindex,localhead;
  124. unsigned long rest, len;
  125. char *nextdata;
  126. nextdata = data;
  127. rest = count;
  128. while (rest > 0) {
  129. headindex = eerb->head / PAGE_SIZE;
  130. localhead = eerb->head % PAGE_SIZE;
  131. len = min(rest, (PAGE_SIZE - localhead));
  132. memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
  133. nextdata += len;
  134. rest -= len;
  135. eerb->head += len;
  136. if ( eerb->head == eerb->buffersize )
  137. eerb->head = 0; /* wrap around */
  138. if (eerb->head > eerb->buffersize) {
  139. MESSAGE(KERN_ERR, "%s", "runaway buffer head.");
  140. BUG();
  141. }
  142. }
  143. }
  144. /*
  145. * needs to be called with bufferlock held
  146. */
  147. static int
  148. dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data)
  149. {
  150. unsigned long tailindex,localtail;
  151. unsigned long rest, len, finalcount;
  152. char *nextdata;
  153. finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
  154. nextdata = data;
  155. rest = finalcount;
  156. while (rest > 0) {
  157. tailindex = eerb->tail / PAGE_SIZE;
  158. localtail = eerb->tail % PAGE_SIZE;
  159. len = min(rest, (PAGE_SIZE - localtail));
  160. memcpy(nextdata, eerb->buffer[tailindex]+localtail, len);
  161. nextdata += len;
  162. rest -= len;
  163. eerb->tail += len;
  164. if ( eerb->tail == eerb->buffersize )
  165. eerb->tail = 0; /* wrap around */
  166. if (eerb->tail > eerb->buffersize) {
  167. MESSAGE(KERN_ERR, "%s", "runaway buffer tail.");
  168. BUG();
  169. }
  170. }
  171. return finalcount;
  172. }
  173. /*
  174. * Whenever you want to write a blob of data to the internal buffer you
  175. * have to start by using this function first. It will write the number
  176. * of bytes that will be written to the buffer. If necessary it will remove
  177. * old records to make room for the new one.
  178. * needs to be called with bufferlock held
  179. */
  180. static int
  181. dasd_eer_start_record(struct eerbuffer *eerb, int count)
  182. {
  183. int tailcount;
  184. if (count + sizeof(count) > eerb->buffersize)
  185. return -ENOMEM;
  186. while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
  187. if (eerb->residual > 0) {
  188. eerb->tail += eerb->residual;
  189. if (eerb->tail >= eerb->buffersize)
  190. eerb->tail -= eerb->buffersize;
  191. eerb->residual = -1;
  192. }
  193. dasd_eer_read_buffer(eerb, sizeof(tailcount),
  194. (char*)(&tailcount));
  195. eerb->tail += tailcount;
  196. if (eerb->tail >= eerb->buffersize)
  197. eerb->tail -= eerb->buffersize;
  198. }
  199. dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count));
  200. return 0;
  201. };
  202. /*
  203. * release pages that are not used anymore
  204. */
  205. static void
  206. dasd_eer_free_buffer_pages(char **buf, int no_pages)
  207. {
  208. int i;
  209. for (i = 0; i < no_pages; ++i) {
  210. free_page((unsigned long)buf[i]);
  211. }
  212. }
  213. /*
  214. * allocate a new set of memory pages
  215. */
  216. static int
  217. dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
  218. {
  219. int i;
  220. for (i = 0; i < no_pages; ++i) {
  221. buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
  222. if (!buf[i]) {
  223. dasd_eer_free_buffer_pages(buf, i);
  224. return -ENOMEM;
  225. }
  226. }
  227. return 0;
  228. }
  229. /*
  230. * empty the buffer by resetting head and tail
  231. * In case there is a half read data blob in the buffer, we set residual
  232. * to -1 to indicate that the remainder of the blob is lost.
  233. */
  234. static void
  235. dasd_eer_purge_buffer(struct eerbuffer *eerb)
  236. {
  237. unsigned long flags;
  238. spin_lock_irqsave(&bufferlock, flags);
  239. if (eerb->residual > 0)
  240. eerb->residual = -1;
  241. eerb->tail=0;
  242. eerb->head=0;
  243. spin_unlock_irqrestore(&bufferlock, flags);
  244. }
  245. /*
  246. * set the size of the buffer, newsize is the new number of pages to be used
  247. * we don't try to copy any data back an forth, so any resize will also purge
  248. * the buffer
  249. */
  250. static int
  251. dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize)
  252. {
  253. int i, oldcount, reuse;
  254. char **new;
  255. char **old;
  256. unsigned long flags;
  257. if (newsize < 1)
  258. return -EINVAL;
  259. if (eerb->buffer_page_count == newsize) {
  260. /* documented behaviour is that any successfull invocation
  261. * will purge all records */
  262. dasd_eer_purge_buffer(eerb);
  263. return 0;
  264. }
  265. new = kmalloc(newsize*sizeof(char*), GFP_KERNEL);
  266. if (!new)
  267. return -ENOMEM;
  268. reuse=min(eerb->buffer_page_count, newsize);
  269. for (i = 0; i < reuse; ++i) {
  270. new[i] = eerb->buffer[i];
  271. }
  272. if (eerb->buffer_page_count < newsize) {
  273. if (dasd_eer_allocate_buffer_pages(
  274. &new[eerb->buffer_page_count],
  275. newsize - eerb->buffer_page_count)) {
  276. kfree(new);
  277. return -ENOMEM;
  278. }
  279. }
  280. spin_lock_irqsave(&bufferlock, flags);
  281. old = eerb->buffer;
  282. eerb->buffer = new;
  283. if (eerb->residual > 0)
  284. eerb->residual = -1;
  285. eerb->tail = 0;
  286. eerb->head = 0;
  287. oldcount = eerb->buffer_page_count;
  288. eerb->buffer_page_count = newsize;
  289. spin_unlock_irqrestore(&bufferlock, flags);
  290. if (oldcount > newsize) {
  291. for (i = newsize; i < oldcount; ++i) {
  292. free_page((unsigned long)old[i]);
  293. }
  294. }
  295. kfree(old);
  296. return 0;
  297. }
  298. /*****************************************************************************/
  299. /* The extended error reporting functionality */
  300. /*****************************************************************************/
  301. /*
  302. * When a DASD device driver wants to report an error, it calls the
  303. * function dasd_eer_write_trigger (via a notifier mechanism) and gives the
  304. * respective trigger ID as parameter.
  305. * Currently there are four kinds of triggers:
  306. *
  307. * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
  308. * DASD_EER_PPRCSUSPEND: PPRC was suspended
  309. * DASD_EER_NOPATH: There is no path to the device left.
  310. * DASD_EER_STATECHANGE: The state of the device has changed.
  311. *
  312. * For the first three triggers all required information can be supplied by
  313. * the caller. For these triggers a record is written by the function
  314. * dasd_eer_write_standard_trigger.
  315. *
  316. * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE
  317. * trigger, we have to gather the necessary sense data first. We cannot queue
  318. * the necessary SNSS (sense subsystem status) request immediatly, since we
  319. * are likely to run in a deadlock situation. Instead, we schedule a
  320. * work_struct that calls the function dasd_eer_sense_subsystem_status to
  321. * create and start an SNSS request asynchronously.
  322. *
  323. * To avoid memory allocations at runtime, the necessary memory is allocated
  324. * when the extended error reporting is enabled for a device (by
  325. * dasd_eer_probe). There is one private eer data structure for each eer
  326. * enabled DASD device. It contains memory for the work_struct, one SNSS cqr
  327. * and a flags field that is used to coordinate the use of the cqr. The call
  328. * to write a state change trigger can come in at any time, so we have one flag
  329. * CQR_IN_USE that protects the cqr itself. When this flag indicates that the
  330. * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a
  331. * second request but sets the SNSS_REQUESTED flag instead.
  332. *
  333. * When the request is finished, the callback function dasd_eer_SNSS_cb
  334. * is called. This function will invoke the function
  335. * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also
  336. * check the SNSS_REQUESTED flag and if it is set it will call
  337. * dasd_eer_sense_subsystem_status again.
  338. *
  339. * To avoid race conditions during the handling of the lock, the flags must
  340. * be protected by the snsslock.
  341. */
  342. struct dasd_eer_private {
  343. struct dasd_ccw_req *cqr;
  344. unsigned long flags;
  345. struct work_struct worker;
  346. };
  347. static void dasd_eer_destroy(struct dasd_device *device,
  348. struct dasd_eer_private *eer);
  349. static int
  350. dasd_eer_write_trigger(struct dasd_eer_trigger *trigger);
  351. static void dasd_eer_sense_subsystem_status(void *data);
  352. static int dasd_eer_notify(struct notifier_block *self,
  353. unsigned long action, void *data);
  354. struct workqueue_struct *dasd_eer_workqueue;
  355. #define SNSS_DATA_SIZE 44
  356. static spinlock_t snsslock = SPIN_LOCK_UNLOCKED;
  357. #define DASD_EER_BUSID_SIZE 10
  358. struct dasd_eer_header {
  359. __u32 total_size;
  360. __u32 trigger;
  361. __u64 tv_sec;
  362. __u64 tv_usec;
  363. char busid[DASD_EER_BUSID_SIZE];
  364. } __attribute__ ((packed));
  365. static struct notifier_block dasd_eer_nb = {
  366. .notifier_call = dasd_eer_notify,
  367. };
  368. /*
  369. * flags for use with dasd_eer_private
  370. */
  371. #define CQR_IN_USE 0
  372. #define SNSS_REQUESTED 1
  373. /*
  374. * This function checks if extended error reporting is available for a given
  375. * dasd_device. If yes, then it creates and returns a struct dasd_eer,
  376. * otherwise it returns an -EPERM error pointer.
  377. */
  378. struct dasd_eer_private *
  379. dasd_eer_probe(struct dasd_device *device)
  380. {
  381. struct dasd_eer_private *private;
  382. if (!(device && device->discipline
  383. && !strcmp(device->discipline->name, "ECKD"))) {
  384. return ERR_PTR(-EPERM);
  385. }
  386. /* allocate the private data structure */
  387. private = (struct dasd_eer_private *)kmalloc(
  388. sizeof(struct dasd_eer_private), GFP_KERNEL);
  389. if (!private) {
  390. return ERR_PTR(-ENOMEM);
  391. }
  392. INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status,
  393. (void *)device);
  394. private->cqr = dasd_kmalloc_request("ECKD",
  395. 1 /* SNSS */ ,
  396. SNSS_DATA_SIZE ,
  397. device);
  398. if (!private->cqr) {
  399. kfree(private);
  400. return ERR_PTR(-ENOMEM);
  401. }
  402. private->flags = 0;
  403. return private;
  404. };
  405. /*
  406. * If our private SNSS request is queued, remove it from the
  407. * dasd ccw queue so we can free the requests memory.
  408. */
  409. static void
  410. dasd_eer_dequeue_SNSS_request(struct dasd_device *device,
  411. struct dasd_eer_private *eer)
  412. {
  413. struct list_head *lst, *nxt;
  414. struct dasd_ccw_req *cqr, *erpcqr;
  415. dasd_erp_fn_t erp_fn;
  416. spin_lock_irq(get_ccwdev_lock(device->cdev));
  417. list_for_each_safe(lst, nxt, &device->ccw_queue) {
  418. cqr = list_entry(lst, struct dasd_ccw_req, list);
  419. /* we are looking for two kinds or requests */
  420. /* first kind: our SNSS request: */
  421. if (cqr == eer->cqr) {
  422. if (cqr->status == DASD_CQR_IN_IO)
  423. device->discipline->term_IO(cqr);
  424. list_del(&cqr->list);
  425. break;
  426. }
  427. /* second kind: ERP requests for our SNSS request */
  428. if (cqr->refers) {
  429. /* If this erp request chain ends in our cqr, then */
  430. /* cal the erp_postaction to clean it up */
  431. erpcqr = cqr;
  432. while (erpcqr->refers) {
  433. erpcqr = erpcqr->refers;
  434. }
  435. if (erpcqr == eer->cqr) {
  436. erp_fn = device->discipline->erp_postaction(
  437. cqr);
  438. erp_fn(cqr);
  439. }
  440. continue;
  441. }
  442. }
  443. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  444. }
  445. /*
  446. * This function dismantles a struct dasd_eer that was created by
  447. * dasd_eer_probe. Since we want to free our private data structure,
  448. * we must make sure that the memory is not in use anymore.
  449. * We have to flush the work queue and remove a possible SNSS request
  450. * from the dasd queue.
  451. */
  452. static void
  453. dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer)
  454. {
  455. flush_workqueue(dasd_eer_workqueue);
  456. dasd_eer_dequeue_SNSS_request(device, eer);
  457. dasd_kfree_request(eer->cqr, device);
  458. kfree(eer);
  459. };
  460. /*
  461. * enable the extended error reporting for a particular device
  462. */
  463. static int
  464. dasd_eer_enable_on_device(struct dasd_device *device)
  465. {
  466. void *eer;
  467. if (!device)
  468. return -ENODEV;
  469. if (device->eer)
  470. return 0;
  471. if (!try_module_get(THIS_MODULE)) {
  472. return -EINVAL;
  473. }
  474. eer = (void *)dasd_eer_probe(device);
  475. if (IS_ERR(eer)) {
  476. module_put(THIS_MODULE);
  477. return PTR_ERR(eer);
  478. }
  479. device->eer = eer;
  480. return 0;
  481. }
  482. /*
  483. * enable the extended error reporting for a particular device
  484. */
  485. static int
  486. dasd_eer_disable_on_device(struct dasd_device *device)
  487. {
  488. struct dasd_eer_private *eer = device->eer;
  489. if (!device)
  490. return -ENODEV;
  491. if (!device->eer)
  492. return 0;
  493. device->eer = NULL;
  494. dasd_eer_destroy(device,eer);
  495. module_put(THIS_MODULE);
  496. return 0;
  497. }
  498. /*
  499. * Set extended error reporting (eer)
  500. * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
  501. */
  502. static int
  503. dasd_ioctl_set_eer(struct block_device *bdev, int no, long args)
  504. {
  505. struct dasd_device *device;
  506. int intval;
  507. if (!capable(CAP_SYS_ADMIN))
  508. return -EACCES;
  509. if (bdev != bdev->bd_contains)
  510. /* Error-reporting is not allowed for partitions */
  511. return -EINVAL;
  512. if (get_user(intval, (int __user *) args))
  513. return -EFAULT;
  514. device = bdev->bd_disk->private_data;
  515. if (device == NULL)
  516. return -ENODEV;
  517. intval = (intval != 0);
  518. DEV_MESSAGE (KERN_DEBUG, device,
  519. "set eer on device to %d", intval);
  520. if (intval)
  521. return dasd_eer_enable_on_device(device);
  522. else
  523. return dasd_eer_disable_on_device(device);
  524. }
  525. /*
  526. * Get value of extended error reporting.
  527. * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
  528. */
  529. static int
  530. dasd_ioctl_get_eer(struct block_device *bdev, int no, long args)
  531. {
  532. struct dasd_device *device;
  533. device = bdev->bd_disk->private_data;
  534. if (device == NULL)
  535. return -ENODEV;
  536. return put_user((device->eer != NULL), (int __user *) args);
  537. }
  538. /*
  539. * The following function can be used for those triggers that have
  540. * all necessary data available when the function is called.
  541. * If the parameter cqr is not NULL, the chain of requests will be searched
  542. * for valid sense data, and all valid sense data sets will be added to
  543. * the triggers data.
  544. */
  545. static int
  546. dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device,
  547. struct dasd_ccw_req *cqr)
  548. {
  549. struct dasd_ccw_req *temp_cqr;
  550. int data_size;
  551. struct timeval tv;
  552. struct dasd_eer_header header;
  553. unsigned long flags;
  554. struct eerbuffer *eerb;
  555. /* go through cqr chain and count the valid sense data sets */
  556. temp_cqr = cqr;
  557. data_size = 0;
  558. while (temp_cqr) {
  559. if (temp_cqr->irb.esw.esw0.erw.cons)
  560. data_size += 32;
  561. temp_cqr = temp_cqr->refers;
  562. }
  563. header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
  564. header.trigger = trigger;
  565. do_gettimeofday(&tv);
  566. header.tv_sec = tv.tv_sec;
  567. header.tv_usec = tv.tv_usec;
  568. strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
  569. spin_lock_irqsave(&bufferlock, flags);
  570. list_for_each_entry(eerb, &bufferlist, list) {
  571. dasd_eer_start_record(eerb, header.total_size);
  572. dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header));
  573. temp_cqr = cqr;
  574. while (temp_cqr) {
  575. if (temp_cqr->irb.esw.esw0.erw.cons)
  576. dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw);
  577. temp_cqr = temp_cqr->refers;
  578. }
  579. dasd_eer_write_buffer(eerb, 4,"EOR");
  580. }
  581. spin_unlock_irqrestore(&bufferlock, flags);
  582. wake_up_interruptible(&dasd_eer_read_wait_queue);
  583. return 0;
  584. }
  585. /*
  586. * This function writes a DASD_EER_STATECHANGE trigger.
  587. */
  588. static void
  589. dasd_eer_write_SNSS_trigger(struct dasd_device *device,
  590. struct dasd_ccw_req *cqr)
  591. {
  592. int data_size;
  593. int snss_rc;
  594. struct timeval tv;
  595. struct dasd_eer_header header;
  596. unsigned long flags;
  597. struct eerbuffer *eerb;
  598. snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  599. if (snss_rc)
  600. data_size = 0;
  601. else
  602. data_size = SNSS_DATA_SIZE;
  603. header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
  604. header.trigger = DASD_EER_STATECHANGE;
  605. do_gettimeofday(&tv);
  606. header.tv_sec = tv.tv_sec;
  607. header.tv_usec = tv.tv_usec;
  608. strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
  609. spin_lock_irqsave(&bufferlock, flags);
  610. list_for_each_entry(eerb, &bufferlist, list) {
  611. dasd_eer_start_record(eerb, header.total_size);
  612. dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header));
  613. if (!snss_rc)
  614. dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data);
  615. dasd_eer_write_buffer(eerb, 4,"EOR");
  616. }
  617. spin_unlock_irqrestore(&bufferlock, flags);
  618. wake_up_interruptible(&dasd_eer_read_wait_queue);
  619. }
  620. /*
  621. * callback function for use with SNSS request
  622. */
  623. static void
  624. dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data)
  625. {
  626. struct dasd_device *device;
  627. struct dasd_eer_private *private;
  628. unsigned long irqflags;
  629. device = (struct dasd_device *)data;
  630. private = (struct dasd_eer_private *)device->eer;
  631. dasd_eer_write_SNSS_trigger(device, cqr);
  632. spin_lock_irqsave(&snsslock, irqflags);
  633. if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) {
  634. clear_bit(CQR_IN_USE, &private->flags);
  635. spin_unlock_irqrestore(&snsslock, irqflags);
  636. return;
  637. };
  638. clear_bit(CQR_IN_USE, &private->flags);
  639. spin_unlock_irqrestore(&snsslock, irqflags);
  640. dasd_eer_sense_subsystem_status(device);
  641. return;
  642. }
  643. /*
  644. * clean a used cqr before using it again
  645. */
  646. static void
  647. dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr)
  648. {
  649. struct ccw1 *cpaddr = cqr->cpaddr;
  650. void *data = cqr->data;
  651. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  652. memset(cpaddr, 0, sizeof(struct ccw1));
  653. memset(data, 0, SNSS_DATA_SIZE);
  654. cqr->cpaddr = cpaddr;
  655. cqr->data = data;
  656. strncpy((char *) &cqr->magic, "ECKD", 4);
  657. ASCEBC((char *) &cqr->magic, 4);
  658. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  659. }
  660. /*
  661. * build and start an SNSS request
  662. * This function is called from a work queue so we have to
  663. * pass the dasd_device pointer as a void pointer.
  664. */
  665. static void
  666. dasd_eer_sense_subsystem_status(void *data)
  667. {
  668. struct dasd_device *device;
  669. struct dasd_eer_private *private;
  670. struct dasd_ccw_req *cqr;
  671. struct ccw1 *ccw;
  672. unsigned long irqflags;
  673. device = (struct dasd_device *)data;
  674. private = (struct dasd_eer_private *)device->eer;
  675. if (!private) /* device not eer enabled any more */
  676. return;
  677. cqr = private->cqr;
  678. spin_lock_irqsave(&snsslock, irqflags);
  679. if(test_and_set_bit(CQR_IN_USE, &private->flags)) {
  680. set_bit(SNSS_REQUESTED, &private->flags);
  681. spin_unlock_irqrestore(&snsslock, irqflags);
  682. return;
  683. };
  684. spin_unlock_irqrestore(&snsslock, irqflags);
  685. dasd_eer_clean_SNSS_request(cqr);
  686. cqr->device = device;
  687. cqr->retries = 255;
  688. cqr->expires = 10 * HZ;
  689. ccw = cqr->cpaddr;
  690. ccw->cmd_code = DASD_ECKD_CCW_SNSS;
  691. ccw->count = SNSS_DATA_SIZE;
  692. ccw->flags = 0;
  693. ccw->cda = (__u32)(addr_t)cqr->data;
  694. cqr->buildclk = get_clock();
  695. cqr->status = DASD_CQR_FILLED;
  696. cqr->callback = dasd_eer_SNSS_cb;
  697. cqr->callback_data = (void *)device;
  698. dasd_add_request_head(cqr);
  699. return;
  700. }
  701. /*
  702. * This function is called for all triggers. It calls the appropriate
  703. * function that writes the actual trigger records.
  704. */
  705. static int
  706. dasd_eer_write_trigger(struct dasd_eer_trigger *trigger)
  707. {
  708. int rc;
  709. struct dasd_eer_private *private = trigger->device->eer;
  710. switch (trigger->id) {
  711. case DASD_EER_FATALERROR:
  712. case DASD_EER_PPRCSUSPEND:
  713. rc = dasd_eer_write_standard_trigger(
  714. trigger->id, trigger->device, trigger->cqr);
  715. break;
  716. case DASD_EER_NOPATH:
  717. rc = dasd_eer_write_standard_trigger(
  718. trigger->id, trigger->device, NULL);
  719. break;
  720. case DASD_EER_STATECHANGE:
  721. if (queue_work(dasd_eer_workqueue, &private->worker)) {
  722. rc=0;
  723. } else {
  724. /* If the work_struct was already queued, it can't
  725. * be queued again. But this is OK since we don't
  726. * need to have it queued twice.
  727. */
  728. rc = -EBUSY;
  729. }
  730. break;
  731. default: /* unknown trigger, so we write it without any sense data */
  732. rc = dasd_eer_write_standard_trigger(
  733. trigger->id, trigger->device, NULL);
  734. break;
  735. }
  736. return rc;
  737. }
  738. /*
  739. * This function is registered with the dasd device driver and gets called
  740. * for all dasd eer notifications.
  741. */
  742. static int dasd_eer_notify(struct notifier_block *self,
  743. unsigned long action, void *data)
  744. {
  745. switch (action) {
  746. case DASD_EER_DISABLE:
  747. dasd_eer_disable_on_device((struct dasd_device *)data);
  748. break;
  749. case DASD_EER_TRIGGER:
  750. dasd_eer_write_trigger((struct dasd_eer_trigger *)data);
  751. break;
  752. }
  753. return NOTIFY_OK;
  754. }
  755. /*****************************************************************************/
  756. /* the device operations */
  757. /*****************************************************************************/
  758. /*
  759. * On the one side we need a lock to access our internal buffer, on the
  760. * other side a copy_to_user can sleep. So we need to copy the data we have
  761. * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
  762. */
  763. static char readbuffer[PAGE_SIZE];
  764. DECLARE_MUTEX(readbuffer_mutex);
  765. static int
  766. dasd_eer_open(struct inode *inp, struct file *filp)
  767. {
  768. struct eerbuffer *eerb;
  769. unsigned long flags;
  770. eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL);
  771. eerb->head = 0;
  772. eerb->tail = 0;
  773. eerb->residual = 0;
  774. eerb->buffer_page_count = 1;
  775. eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
  776. eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*),
  777. GFP_KERNEL);
  778. if (!eerb->buffer)
  779. return -ENOMEM;
  780. if (dasd_eer_allocate_buffer_pages(eerb->buffer,
  781. eerb->buffer_page_count)) {
  782. kfree(eerb->buffer);
  783. return -ENOMEM;
  784. }
  785. filp->private_data = eerb;
  786. spin_lock_irqsave(&bufferlock, flags);
  787. list_add(&eerb->list, &bufferlist);
  788. spin_unlock_irqrestore(&bufferlock, flags);
  789. return nonseekable_open(inp,filp);
  790. }
  791. static int
  792. dasd_eer_close(struct inode *inp, struct file *filp)
  793. {
  794. struct eerbuffer *eerb;
  795. unsigned long flags;
  796. eerb = (struct eerbuffer *)filp->private_data;
  797. spin_lock_irqsave(&bufferlock, flags);
  798. list_del(&eerb->list);
  799. spin_unlock_irqrestore(&bufferlock, flags);
  800. dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
  801. kfree(eerb->buffer);
  802. kfree(eerb);
  803. return 0;
  804. }
  805. static long
  806. dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  807. {
  808. int intval;
  809. struct eerbuffer *eerb;
  810. eerb = (struct eerbuffer *)filp->private_data;
  811. switch (cmd) {
  812. case DASD_EER_PURGE:
  813. dasd_eer_purge_buffer(eerb);
  814. return 0;
  815. case DASD_EER_SETBUFSIZE:
  816. if (get_user(intval, (int __user *)arg))
  817. return -EFAULT;
  818. return dasd_eer_resize_buffer(eerb, intval);
  819. default:
  820. return -ENOIOCTLCMD;
  821. }
  822. }
  823. static ssize_t
  824. dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
  825. {
  826. int tc,rc;
  827. int tailcount,effective_count;
  828. unsigned long flags;
  829. struct eerbuffer *eerb;
  830. eerb = (struct eerbuffer *)filp->private_data;
  831. if(down_interruptible(&readbuffer_mutex))
  832. return -ERESTARTSYS;
  833. spin_lock_irqsave(&bufferlock, flags);
  834. if (eerb->residual < 0) { /* the remainder of this record */
  835. /* has been deleted */
  836. eerb->residual = 0;
  837. spin_unlock_irqrestore(&bufferlock, flags);
  838. up(&readbuffer_mutex);
  839. return -EIO;
  840. } else if (eerb->residual > 0) {
  841. /* OK we still have a second half of a record to deliver */
  842. effective_count = min(eerb->residual, (int)count);
  843. eerb->residual -= effective_count;
  844. } else {
  845. tc = 0;
  846. while (!tc) {
  847. tc = dasd_eer_read_buffer(eerb,
  848. sizeof(tailcount), (char*)(&tailcount));
  849. if (!tc) {
  850. /* no data available */
  851. spin_unlock_irqrestore(&bufferlock, flags);
  852. up(&readbuffer_mutex);
  853. if (filp->f_flags & O_NONBLOCK)
  854. return -EAGAIN;
  855. rc = wait_event_interruptible(
  856. dasd_eer_read_wait_queue,
  857. eerb->head != eerb->tail);
  858. if (rc) {
  859. return rc;
  860. }
  861. if(down_interruptible(&readbuffer_mutex))
  862. return -ERESTARTSYS;
  863. spin_lock_irqsave(&bufferlock, flags);
  864. }
  865. }
  866. WARN_ON(tc != sizeof(tailcount));
  867. effective_count = min(tailcount,(int)count);
  868. eerb->residual = tailcount - effective_count;
  869. }
  870. tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer);
  871. WARN_ON(tc != effective_count);
  872. spin_unlock_irqrestore(&bufferlock, flags);
  873. if (copy_to_user(buf, readbuffer, effective_count)) {
  874. up(&readbuffer_mutex);
  875. return -EFAULT;
  876. }
  877. up(&readbuffer_mutex);
  878. return effective_count;
  879. }
  880. static unsigned int
  881. dasd_eer_poll (struct file *filp, poll_table *ptable)
  882. {
  883. unsigned int mask;
  884. unsigned long flags;
  885. struct eerbuffer *eerb;
  886. eerb = (struct eerbuffer *)filp->private_data;
  887. poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
  888. spin_lock_irqsave(&bufferlock, flags);
  889. if (eerb->head != eerb->tail)
  890. mask = POLLIN | POLLRDNORM ;
  891. else
  892. mask = 0;
  893. spin_unlock_irqrestore(&bufferlock, flags);
  894. return mask;
  895. }
  896. static struct file_operations dasd_eer_fops = {
  897. .open = &dasd_eer_open,
  898. .release = &dasd_eer_close,
  899. .unlocked_ioctl = &dasd_eer_ioctl,
  900. .compat_ioctl = &dasd_eer_ioctl,
  901. .read = &dasd_eer_read,
  902. .poll = &dasd_eer_poll,
  903. .owner = THIS_MODULE,
  904. };
  905. static struct miscdevice dasd_eer_dev = {
  906. .minor = MISC_DYNAMIC_MINOR,
  907. .name = "dasd_eer",
  908. .fops = &dasd_eer_fops,
  909. };
  910. /*****************************************************************************/
  911. /* Init and exit */
  912. /*****************************************************************************/
  913. static int
  914. __init dasd_eer_init(void)
  915. {
  916. int rc;
  917. dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer");
  918. if (!dasd_eer_workqueue) {
  919. MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not "
  920. "create workqueue \n");
  921. rc = -ENOMEM;
  922. goto out;
  923. }
  924. rc = dasd_register_eer_notifier(&dasd_eer_nb);
  925. if (rc) {
  926. MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
  927. "register error reporting");
  928. goto queue;
  929. }
  930. dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer);
  931. dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer);
  932. /* we don't need our own character device,
  933. * so we just register as misc device */
  934. rc = misc_register(&dasd_eer_dev);
  935. if (rc) {
  936. MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
  937. "register misc device");
  938. goto unregister;
  939. }
  940. return 0;
  941. unregister:
  942. dasd_unregister_eer_notifier(&dasd_eer_nb);
  943. dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
  944. dasd_ioctl_set_eer);
  945. dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
  946. dasd_ioctl_get_eer);
  947. queue:
  948. destroy_workqueue(dasd_eer_workqueue);
  949. out:
  950. return rc;
  951. }
  952. module_init(dasd_eer_init);
  953. static void
  954. __exit dasd_eer_exit(void)
  955. {
  956. dasd_unregister_eer_notifier(&dasd_eer_nb);
  957. dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
  958. dasd_ioctl_set_eer);
  959. dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
  960. dasd_ioctl_get_eer);
  961. destroy_workqueue(dasd_eer_workqueue);
  962. WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
  963. }
  964. module_exit(dasd_eer_exit);