tape_core.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * drivers/s390/char/tape_core.c
  3. * basic function of the tape device driver
  4. *
  5. * S390 and zSeries version
  6. * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7. * Author(s): Carsten Otte <cotte@de.ibm.com>
  8. * Michael Holzheu <holzheu@de.ibm.com>
  9. * Tuan Ngo-Anh <ngoanh@de.ibm.com>
  10. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  11. */
  12. #include <linux/config.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h> // for kernel parameters
  15. #include <linux/kmod.h> // for requesting modules
  16. #include <linux/spinlock.h> // for locks
  17. #include <linux/vmalloc.h>
  18. #include <linux/list.h>
  19. #include <asm/types.h> // for variable types
  20. #define TAPE_DBF_AREA tape_core_dbf
  21. #include "tape.h"
  22. #include "tape_std.h"
  23. #define PRINTK_HEADER "TAPE_CORE: "
  24. static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
  25. static void __tape_remove_request(struct tape_device *, struct tape_request *);
  26. /*
  27. * One list to contain all tape devices of all disciplines, so
  28. * we can assign the devices to minor numbers of the same major
  29. * The list is protected by the rwlock
  30. */
  31. static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
  32. static DEFINE_RWLOCK(tape_device_lock);
  33. /*
  34. * Pointer to debug area.
  35. */
  36. debug_info_t *TAPE_DBF_AREA = NULL;
  37. EXPORT_SYMBOL(TAPE_DBF_AREA);
  38. /*
  39. * Printable strings for tape enumerations.
  40. */
  41. const char *tape_state_verbose[TS_SIZE] =
  42. {
  43. [TS_UNUSED] = "UNUSED",
  44. [TS_IN_USE] = "IN_USE",
  45. [TS_BLKUSE] = "BLKUSE",
  46. [TS_INIT] = "INIT ",
  47. [TS_NOT_OPER] = "NOT_OP"
  48. };
  49. const char *tape_op_verbose[TO_SIZE] =
  50. {
  51. [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
  52. [TO_BSF] = "BSF", [TO_DSE] = "DSE",
  53. [TO_FSB] = "FSB", [TO_FSF] = "FSF",
  54. [TO_LBL] = "LBL", [TO_NOP] = "NOP",
  55. [TO_RBA] = "RBA", [TO_RBI] = "RBI",
  56. [TO_RFO] = "RFO", [TO_REW] = "REW",
  57. [TO_RUN] = "RUN", [TO_WRI] = "WRI",
  58. [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
  59. [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
  60. [TO_READ_ATTMSG] = "RAT",
  61. [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
  62. [TO_UNASSIGN] = "UAS"
  63. };
  64. static inline int
  65. busid_to_int(char *bus_id)
  66. {
  67. int dec;
  68. int d;
  69. char * s;
  70. for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++)
  71. d = (d * 10) + (*s - '0');
  72. dec = d;
  73. for(s++, d = 0; *s != '\0' && *s != '.'; s++)
  74. d = (d * 10) + (*s - '0');
  75. dec = (dec << 8) + d;
  76. for(s++; *s != '\0'; s++) {
  77. if (*s >= '0' && *s <= '9') {
  78. d = *s - '0';
  79. } else if (*s >= 'a' && *s <= 'f') {
  80. d = *s - 'a' + 10;
  81. } else {
  82. d = *s - 'A' + 10;
  83. }
  84. dec = (dec << 4) + d;
  85. }
  86. return dec;
  87. }
  88. /*
  89. * Some channel attached tape specific attributes.
  90. *
  91. * FIXME: In the future the first_minor and blocksize attribute should be
  92. * replaced by a link to the cdev tree.
  93. */
  94. static ssize_t
  95. tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
  96. {
  97. struct tape_device *tdev;
  98. tdev = (struct tape_device *) dev->driver_data;
  99. return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
  100. }
  101. static
  102. DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
  103. static ssize_t
  104. tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
  105. {
  106. struct tape_device *tdev;
  107. tdev = (struct tape_device *) dev->driver_data;
  108. return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
  109. }
  110. static
  111. DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
  112. static ssize_t
  113. tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
  114. {
  115. struct tape_device *tdev;
  116. tdev = (struct tape_device *) dev->driver_data;
  117. return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
  118. "OFFLINE" : tape_state_verbose[tdev->tape_state]);
  119. }
  120. static
  121. DEVICE_ATTR(state, 0444, tape_state_show, NULL);
  122. static ssize_t
  123. tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
  124. {
  125. struct tape_device *tdev;
  126. ssize_t rc;
  127. tdev = (struct tape_device *) dev->driver_data;
  128. if (tdev->first_minor < 0)
  129. return scnprintf(buf, PAGE_SIZE, "N/A\n");
  130. spin_lock_irq(get_ccwdev_lock(tdev->cdev));
  131. if (list_empty(&tdev->req_queue))
  132. rc = scnprintf(buf, PAGE_SIZE, "---\n");
  133. else {
  134. struct tape_request *req;
  135. req = list_entry(tdev->req_queue.next, struct tape_request,
  136. list);
  137. rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
  138. }
  139. spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
  140. return rc;
  141. }
  142. static
  143. DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
  144. static ssize_t
  145. tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
  146. {
  147. struct tape_device *tdev;
  148. tdev = (struct tape_device *) dev->driver_data;
  149. return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
  150. }
  151. static
  152. DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
  153. static struct attribute *tape_attrs[] = {
  154. &dev_attr_medium_state.attr,
  155. &dev_attr_first_minor.attr,
  156. &dev_attr_state.attr,
  157. &dev_attr_operation.attr,
  158. &dev_attr_blocksize.attr,
  159. NULL
  160. };
  161. static struct attribute_group tape_attr_group = {
  162. .attrs = tape_attrs,
  163. };
  164. /*
  165. * Tape state functions
  166. */
  167. void
  168. tape_state_set(struct tape_device *device, enum tape_state newstate)
  169. {
  170. const char *str;
  171. if (device->tape_state == TS_NOT_OPER) {
  172. DBF_EVENT(3, "ts_set err: not oper\n");
  173. return;
  174. }
  175. DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
  176. if (device->tape_state < TO_SIZE && device->tape_state >= 0)
  177. str = tape_state_verbose[device->tape_state];
  178. else
  179. str = "UNKNOWN TS";
  180. DBF_EVENT(4, "old ts: %s\n", str);
  181. if (device->tape_state < TO_SIZE && device->tape_state >=0 )
  182. str = tape_state_verbose[device->tape_state];
  183. else
  184. str = "UNKNOWN TS";
  185. DBF_EVENT(4, "%s\n", str);
  186. DBF_EVENT(4, "new ts:\t\n");
  187. if (newstate < TO_SIZE && newstate >= 0)
  188. str = tape_state_verbose[newstate];
  189. else
  190. str = "UNKNOWN TS";
  191. DBF_EVENT(4, "%s\n", str);
  192. device->tape_state = newstate;
  193. wake_up(&device->state_change_wq);
  194. }
  195. void
  196. tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
  197. {
  198. if (device->medium_state == newstate)
  199. return;
  200. switch(newstate){
  201. case MS_UNLOADED:
  202. device->tape_generic_status |= GMT_DR_OPEN(~0);
  203. PRINT_INFO("(%s): Tape is unloaded\n",
  204. device->cdev->dev.bus_id);
  205. break;
  206. case MS_LOADED:
  207. device->tape_generic_status &= ~GMT_DR_OPEN(~0);
  208. PRINT_INFO("(%s): Tape has been mounted\n",
  209. device->cdev->dev.bus_id);
  210. break;
  211. default:
  212. // print nothing
  213. break;
  214. }
  215. device->medium_state = newstate;
  216. wake_up(&device->state_change_wq);
  217. }
  218. /*
  219. * Stop running ccw. Has to be called with the device lock held.
  220. */
  221. static inline int
  222. __tape_halt_io(struct tape_device *device, struct tape_request *request)
  223. {
  224. int retries;
  225. int rc;
  226. /* Check if interrupt has already been processed */
  227. if (request->callback == NULL)
  228. return 0;
  229. rc = 0;
  230. for (retries = 0; retries < 5; retries++) {
  231. rc = ccw_device_clear(device->cdev, (long) request);
  232. if (rc == 0) { /* Termination successful */
  233. request->rc = -EIO;
  234. request->status = TAPE_REQUEST_DONE;
  235. return 0;
  236. }
  237. if (rc == -ENODEV)
  238. DBF_EXCEPTION(2, "device gone, retry\n");
  239. else if (rc == -EIO)
  240. DBF_EXCEPTION(2, "I/O error, retry\n");
  241. else if (rc == -EBUSY)
  242. DBF_EXCEPTION(2, "device busy, retry late\n");
  243. else
  244. BUG();
  245. }
  246. return rc;
  247. }
  248. /*
  249. * Add device into the sorted list, giving it the first
  250. * available minor number.
  251. */
  252. static int
  253. tape_assign_minor(struct tape_device *device)
  254. {
  255. struct tape_device *tmp;
  256. int minor;
  257. minor = 0;
  258. write_lock(&tape_device_lock);
  259. list_for_each_entry(tmp, &tape_device_list, node) {
  260. if (minor < tmp->first_minor)
  261. break;
  262. minor += TAPE_MINORS_PER_DEV;
  263. }
  264. if (minor >= 256) {
  265. write_unlock(&tape_device_lock);
  266. return -ENODEV;
  267. }
  268. device->first_minor = minor;
  269. list_add_tail(&device->node, &tmp->node);
  270. write_unlock(&tape_device_lock);
  271. return 0;
  272. }
  273. /* remove device from the list */
  274. static void
  275. tape_remove_minor(struct tape_device *device)
  276. {
  277. write_lock(&tape_device_lock);
  278. list_del_init(&device->node);
  279. device->first_minor = -1;
  280. write_unlock(&tape_device_lock);
  281. }
  282. /*
  283. * Set a device online.
  284. *
  285. * This function is called by the common I/O layer to move a device from the
  286. * detected but offline into the online state.
  287. * If we return an error (RC < 0) the device remains in the offline state. This
  288. * can happen if the device is assigned somewhere else, for example.
  289. */
  290. int
  291. tape_generic_online(struct tape_device *device,
  292. struct tape_discipline *discipline)
  293. {
  294. int rc;
  295. DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
  296. if (device->tape_state != TS_INIT) {
  297. DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
  298. return -EINVAL;
  299. }
  300. /* Let the discipline have a go at the device. */
  301. device->discipline = discipline;
  302. if (!try_module_get(discipline->owner)) {
  303. PRINT_ERR("Cannot get module. Module gone.\n");
  304. return -EINVAL;
  305. }
  306. rc = discipline->setup_device(device);
  307. if (rc)
  308. goto out;
  309. rc = tape_assign_minor(device);
  310. if (rc)
  311. goto out_discipline;
  312. rc = tapechar_setup_device(device);
  313. if (rc)
  314. goto out_minor;
  315. rc = tapeblock_setup_device(device);
  316. if (rc)
  317. goto out_char;
  318. tape_state_set(device, TS_UNUSED);
  319. DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
  320. return 0;
  321. out_char:
  322. tapechar_cleanup_device(device);
  323. out_discipline:
  324. device->discipline->cleanup_device(device);
  325. device->discipline = NULL;
  326. out_minor:
  327. tape_remove_minor(device);
  328. out:
  329. module_put(discipline->owner);
  330. return rc;
  331. }
  332. static inline void
  333. tape_cleanup_device(struct tape_device *device)
  334. {
  335. tapeblock_cleanup_device(device);
  336. tapechar_cleanup_device(device);
  337. device->discipline->cleanup_device(device);
  338. module_put(device->discipline->owner);
  339. tape_remove_minor(device);
  340. tape_med_state_set(device, MS_UNKNOWN);
  341. }
  342. /*
  343. * Set device offline.
  344. *
  345. * Called by the common I/O layer if the drive should set offline on user
  346. * request. We may prevent this by returning an error.
  347. * Manual offline is only allowed while the drive is not in use.
  348. */
  349. int
  350. tape_generic_offline(struct tape_device *device)
  351. {
  352. if (!device) {
  353. PRINT_ERR("tape_generic_offline: no such device\n");
  354. return -ENODEV;
  355. }
  356. DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
  357. device->cdev_id, device);
  358. spin_lock_irq(get_ccwdev_lock(device->cdev));
  359. switch (device->tape_state) {
  360. case TS_INIT:
  361. case TS_NOT_OPER:
  362. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  363. break;
  364. case TS_UNUSED:
  365. tape_state_set(device, TS_INIT);
  366. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  367. tape_cleanup_device(device);
  368. break;
  369. default:
  370. DBF_EVENT(3, "(%08x): Set offline failed "
  371. "- drive in use.\n",
  372. device->cdev_id);
  373. PRINT_WARN("(%s): Set offline failed "
  374. "- drive in use.\n",
  375. device->cdev->dev.bus_id);
  376. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  377. return -EBUSY;
  378. }
  379. DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
  380. return 0;
  381. }
  382. /*
  383. * Allocate memory for a new device structure.
  384. */
  385. static struct tape_device *
  386. tape_alloc_device(void)
  387. {
  388. struct tape_device *device;
  389. device = (struct tape_device *)
  390. kmalloc(sizeof(struct tape_device), GFP_KERNEL);
  391. if (device == NULL) {
  392. DBF_EXCEPTION(2, "ti:no mem\n");
  393. PRINT_INFO ("can't allocate memory for "
  394. "tape info structure\n");
  395. return ERR_PTR(-ENOMEM);
  396. }
  397. memset(device, 0, sizeof(struct tape_device));
  398. device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
  399. if (device->modeset_byte == NULL) {
  400. DBF_EXCEPTION(2, "ti:no mem\n");
  401. PRINT_INFO("can't allocate memory for modeset byte\n");
  402. kfree(device);
  403. return ERR_PTR(-ENOMEM);
  404. }
  405. INIT_LIST_HEAD(&device->req_queue);
  406. INIT_LIST_HEAD(&device->node);
  407. init_waitqueue_head(&device->state_change_wq);
  408. device->tape_state = TS_INIT;
  409. device->medium_state = MS_UNKNOWN;
  410. *device->modeset_byte = 0;
  411. device->first_minor = -1;
  412. atomic_set(&device->ref_count, 1);
  413. return device;
  414. }
  415. /*
  416. * Get a reference to an existing device structure. This will automatically
  417. * increment the reference count.
  418. */
  419. struct tape_device *
  420. tape_get_device_reference(struct tape_device *device)
  421. {
  422. DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
  423. atomic_inc_return(&device->ref_count));
  424. return device;
  425. }
  426. /*
  427. * Decrease the reference counter of a devices structure. If the
  428. * reference counter reaches zero free the device structure.
  429. * The function returns a NULL pointer to be used by the caller
  430. * for clearing reference pointers.
  431. */
  432. struct tape_device *
  433. tape_put_device(struct tape_device *device)
  434. {
  435. int remain;
  436. remain = atomic_dec_return(&device->ref_count);
  437. if (remain > 0) {
  438. DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
  439. } else {
  440. if (remain < 0) {
  441. DBF_EVENT(4, "put device without reference\n");
  442. PRINT_ERR("put device without reference\n");
  443. } else {
  444. DBF_EVENT(4, "tape_free_device(%p)\n", device);
  445. kfree(device->modeset_byte);
  446. kfree(device);
  447. }
  448. }
  449. return NULL;
  450. }
  451. /*
  452. * Find tape device by a device index.
  453. */
  454. struct tape_device *
  455. tape_get_device(int devindex)
  456. {
  457. struct tape_device *device, *tmp;
  458. device = ERR_PTR(-ENODEV);
  459. read_lock(&tape_device_lock);
  460. list_for_each_entry(tmp, &tape_device_list, node) {
  461. if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
  462. device = tape_get_device_reference(tmp);
  463. break;
  464. }
  465. }
  466. read_unlock(&tape_device_lock);
  467. return device;
  468. }
  469. /*
  470. * Driverfs tape probe function.
  471. */
  472. int
  473. tape_generic_probe(struct ccw_device *cdev)
  474. {
  475. struct tape_device *device;
  476. device = tape_alloc_device();
  477. if (IS_ERR(device))
  478. return -ENODEV;
  479. PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
  480. cdev->dev.driver_data = device;
  481. device->cdev = cdev;
  482. device->cdev_id = busid_to_int(cdev->dev.bus_id);
  483. cdev->handler = __tape_do_irq;
  484. ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
  485. sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
  486. return 0;
  487. }
  488. static inline void
  489. __tape_discard_requests(struct tape_device *device)
  490. {
  491. struct tape_request * request;
  492. struct list_head * l, *n;
  493. list_for_each_safe(l, n, &device->req_queue) {
  494. request = list_entry(l, struct tape_request, list);
  495. if (request->status == TAPE_REQUEST_IN_IO)
  496. request->status = TAPE_REQUEST_DONE;
  497. list_del(&request->list);
  498. /* Decrease ref_count for removed request. */
  499. request->device = tape_put_device(device);
  500. request->rc = -EIO;
  501. if (request->callback != NULL)
  502. request->callback(request, request->callback_data);
  503. }
  504. }
  505. /*
  506. * Driverfs tape remove function.
  507. *
  508. * This function is called whenever the common I/O layer detects the device
  509. * gone. This can happen at any time and we cannot refuse.
  510. */
  511. void
  512. tape_generic_remove(struct ccw_device *cdev)
  513. {
  514. struct tape_device * device;
  515. device = cdev->dev.driver_data;
  516. if (!device) {
  517. PRINT_ERR("No device pointer in tape_generic_remove!\n");
  518. return;
  519. }
  520. DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
  521. spin_lock_irq(get_ccwdev_lock(device->cdev));
  522. switch (device->tape_state) {
  523. case TS_INIT:
  524. tape_state_set(device, TS_NOT_OPER);
  525. case TS_NOT_OPER:
  526. /*
  527. * Nothing to do.
  528. */
  529. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  530. break;
  531. case TS_UNUSED:
  532. /*
  533. * Need only to release the device.
  534. */
  535. tape_state_set(device, TS_NOT_OPER);
  536. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  537. tape_cleanup_device(device);
  538. break;
  539. default:
  540. /*
  541. * There may be requests on the queue. We will not get
  542. * an interrupt for a request that was running. So we
  543. * just post them all as I/O errors.
  544. */
  545. DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
  546. device->cdev_id);
  547. PRINT_WARN("(%s): Drive in use vanished - "
  548. "expect trouble!\n",
  549. device->cdev->dev.bus_id);
  550. PRINT_WARN("State was %i\n", device->tape_state);
  551. tape_state_set(device, TS_NOT_OPER);
  552. __tape_discard_requests(device);
  553. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  554. tape_cleanup_device(device);
  555. }
  556. if (cdev->dev.driver_data != NULL) {
  557. sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
  558. cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
  559. }
  560. }
  561. /*
  562. * Allocate a new tape ccw request
  563. */
  564. struct tape_request *
  565. tape_alloc_request(int cplength, int datasize)
  566. {
  567. struct tape_request *request;
  568. if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
  569. BUG();
  570. DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
  571. request = (struct tape_request *) kmalloc(sizeof(struct tape_request),
  572. GFP_KERNEL);
  573. if (request == NULL) {
  574. DBF_EXCEPTION(1, "cqra nomem\n");
  575. return ERR_PTR(-ENOMEM);
  576. }
  577. memset(request, 0, sizeof(struct tape_request));
  578. /* allocate channel program */
  579. if (cplength > 0) {
  580. request->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
  581. GFP_ATOMIC | GFP_DMA);
  582. if (request->cpaddr == NULL) {
  583. DBF_EXCEPTION(1, "cqra nomem\n");
  584. kfree(request);
  585. return ERR_PTR(-ENOMEM);
  586. }
  587. memset(request->cpaddr, 0, cplength*sizeof(struct ccw1));
  588. }
  589. /* alloc small kernel buffer */
  590. if (datasize > 0) {
  591. request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
  592. if (request->cpdata == NULL) {
  593. DBF_EXCEPTION(1, "cqra nomem\n");
  594. if (request->cpaddr != NULL)
  595. kfree(request->cpaddr);
  596. kfree(request);
  597. return ERR_PTR(-ENOMEM);
  598. }
  599. memset(request->cpdata, 0, datasize);
  600. }
  601. DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
  602. request->cpdata);
  603. return request;
  604. }
  605. /*
  606. * Free tape ccw request
  607. */
  608. void
  609. tape_free_request (struct tape_request * request)
  610. {
  611. DBF_LH(6, "Free request %p\n", request);
  612. if (request->device != NULL) {
  613. request->device = tape_put_device(request->device);
  614. }
  615. if (request->cpdata != NULL)
  616. kfree(request->cpdata);
  617. if (request->cpaddr != NULL)
  618. kfree(request->cpaddr);
  619. kfree(request);
  620. }
  621. static inline void
  622. __tape_do_io_list(struct tape_device *device)
  623. {
  624. struct list_head *l, *n;
  625. struct tape_request *request;
  626. int rc;
  627. DBF_LH(6, "__tape_do_io_list(%p)\n", device);
  628. /*
  629. * Try to start each request on request queue until one is
  630. * started successful.
  631. */
  632. list_for_each_safe(l, n, &device->req_queue) {
  633. request = list_entry(l, struct tape_request, list);
  634. #ifdef CONFIG_S390_TAPE_BLOCK
  635. if (request->op == TO_BLOCK)
  636. device->discipline->check_locate(device, request);
  637. #endif
  638. rc = ccw_device_start(device->cdev, request->cpaddr,
  639. (unsigned long) request, 0x00,
  640. request->options);
  641. if (rc == 0) {
  642. request->status = TAPE_REQUEST_IN_IO;
  643. break;
  644. }
  645. /* Start failed. Remove request and indicate failure. */
  646. DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
  647. /* Set ending status and do callback. */
  648. request->rc = rc;
  649. request->status = TAPE_REQUEST_DONE;
  650. __tape_remove_request(device, request);
  651. }
  652. }
  653. static void
  654. __tape_remove_request(struct tape_device *device, struct tape_request *request)
  655. {
  656. /* Remove from request queue. */
  657. list_del(&request->list);
  658. /* Do callback. */
  659. if (request->callback != NULL)
  660. request->callback(request, request->callback_data);
  661. /* Start next request. */
  662. if (!list_empty(&device->req_queue))
  663. __tape_do_io_list(device);
  664. }
  665. /*
  666. * Write sense data to console/dbf
  667. */
  668. void
  669. tape_dump_sense(struct tape_device* device, struct tape_request *request,
  670. struct irb *irb)
  671. {
  672. unsigned int *sptr;
  673. PRINT_INFO("-------------------------------------------------\n");
  674. PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
  675. irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
  676. PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
  677. if (request != NULL)
  678. PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
  679. sptr = (unsigned int *) irb->ecw;
  680. PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
  681. sptr[0], sptr[1], sptr[2], sptr[3]);
  682. PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
  683. sptr[4], sptr[5], sptr[6], sptr[7]);
  684. PRINT_INFO("--------------------------------------------------\n");
  685. }
  686. /*
  687. * Write sense data to dbf
  688. */
  689. void
  690. tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
  691. struct irb *irb)
  692. {
  693. unsigned int *sptr;
  694. const char* op;
  695. if (request != NULL)
  696. op = tape_op_verbose[request->op];
  697. else
  698. op = "---";
  699. DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
  700. irb->scsw.dstat,irb->scsw.cstat);
  701. DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
  702. sptr = (unsigned int *) irb->ecw;
  703. DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
  704. DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
  705. DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
  706. DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
  707. }
  708. /*
  709. * I/O helper function. Adds the request to the request queue
  710. * and starts it if the tape is idle. Has to be called with
  711. * the device lock held.
  712. */
  713. static inline int
  714. __tape_do_io(struct tape_device *device, struct tape_request *request)
  715. {
  716. int rc;
  717. switch (request->op) {
  718. case TO_MSEN:
  719. case TO_ASSIGN:
  720. case TO_UNASSIGN:
  721. case TO_READ_ATTMSG:
  722. if (device->tape_state == TS_INIT)
  723. break;
  724. if (device->tape_state == TS_UNUSED)
  725. break;
  726. default:
  727. if (device->tape_state == TS_BLKUSE)
  728. break;
  729. if (device->tape_state != TS_IN_USE)
  730. return -ENODEV;
  731. }
  732. /* Increase use count of device for the added request. */
  733. request->device = tape_get_device_reference(device);
  734. if (list_empty(&device->req_queue)) {
  735. /* No other requests are on the queue. Start this one. */
  736. #ifdef CONFIG_S390_TAPE_BLOCK
  737. if (request->op == TO_BLOCK)
  738. device->discipline->check_locate(device, request);
  739. #endif
  740. rc = ccw_device_start(device->cdev, request->cpaddr,
  741. (unsigned long) request, 0x00,
  742. request->options);
  743. if (rc) {
  744. DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc);
  745. return rc;
  746. }
  747. DBF_LH(5, "Request %p added for execution.\n", request);
  748. list_add(&request->list, &device->req_queue);
  749. request->status = TAPE_REQUEST_IN_IO;
  750. } else {
  751. DBF_LH(5, "Request %p add to queue.\n", request);
  752. list_add_tail(&request->list, &device->req_queue);
  753. request->status = TAPE_REQUEST_QUEUED;
  754. }
  755. return 0;
  756. }
  757. /*
  758. * Add the request to the request queue, try to start it if the
  759. * tape is idle. Return without waiting for end of i/o.
  760. */
  761. int
  762. tape_do_io_async(struct tape_device *device, struct tape_request *request)
  763. {
  764. int rc;
  765. DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
  766. spin_lock_irq(get_ccwdev_lock(device->cdev));
  767. /* Add request to request queue and try to start it. */
  768. rc = __tape_do_io(device, request);
  769. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  770. return rc;
  771. }
  772. /*
  773. * tape_do_io/__tape_wake_up
  774. * Add the request to the request queue, try to start it if the
  775. * tape is idle and wait uninterruptible for its completion.
  776. */
  777. static void
  778. __tape_wake_up(struct tape_request *request, void *data)
  779. {
  780. request->callback = NULL;
  781. wake_up((wait_queue_head_t *) data);
  782. }
  783. int
  784. tape_do_io(struct tape_device *device, struct tape_request *request)
  785. {
  786. wait_queue_head_t wq;
  787. int rc;
  788. init_waitqueue_head(&wq);
  789. spin_lock_irq(get_ccwdev_lock(device->cdev));
  790. /* Setup callback */
  791. request->callback = __tape_wake_up;
  792. request->callback_data = &wq;
  793. /* Add request to request queue and try to start it. */
  794. rc = __tape_do_io(device, request);
  795. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  796. if (rc)
  797. return rc;
  798. /* Request added to the queue. Wait for its completion. */
  799. wait_event(wq, (request->callback == NULL));
  800. /* Get rc from request */
  801. return request->rc;
  802. }
  803. /*
  804. * tape_do_io_interruptible/__tape_wake_up_interruptible
  805. * Add the request to the request queue, try to start it if the
  806. * tape is idle and wait uninterruptible for its completion.
  807. */
  808. static void
  809. __tape_wake_up_interruptible(struct tape_request *request, void *data)
  810. {
  811. request->callback = NULL;
  812. wake_up_interruptible((wait_queue_head_t *) data);
  813. }
  814. int
  815. tape_do_io_interruptible(struct tape_device *device,
  816. struct tape_request *request)
  817. {
  818. wait_queue_head_t wq;
  819. int rc;
  820. init_waitqueue_head(&wq);
  821. spin_lock_irq(get_ccwdev_lock(device->cdev));
  822. /* Setup callback */
  823. request->callback = __tape_wake_up_interruptible;
  824. request->callback_data = &wq;
  825. rc = __tape_do_io(device, request);
  826. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  827. if (rc)
  828. return rc;
  829. /* Request added to the queue. Wait for its completion. */
  830. rc = wait_event_interruptible(wq, (request->callback == NULL));
  831. if (rc != -ERESTARTSYS)
  832. /* Request finished normally. */
  833. return request->rc;
  834. /* Interrupted by a signal. We have to stop the current request. */
  835. spin_lock_irq(get_ccwdev_lock(device->cdev));
  836. rc = __tape_halt_io(device, request);
  837. if (rc == 0) {
  838. DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
  839. rc = -ERESTARTSYS;
  840. }
  841. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  842. return rc;
  843. }
  844. /*
  845. * Handle requests that return an i/o error in the irb.
  846. */
  847. static inline void
  848. tape_handle_killed_request(
  849. struct tape_device *device,
  850. struct tape_request *request)
  851. {
  852. if(request != NULL) {
  853. /* Set ending status. FIXME: Should the request be retried? */
  854. request->rc = -EIO;
  855. request->status = TAPE_REQUEST_DONE;
  856. __tape_remove_request(device, request);
  857. } else {
  858. __tape_do_io_list(device);
  859. }
  860. }
  861. /*
  862. * Tape interrupt routine, called from the ccw_device layer
  863. */
  864. static void
  865. __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
  866. {
  867. struct tape_device *device;
  868. struct tape_request *request;
  869. int final;
  870. int rc;
  871. device = (struct tape_device *) cdev->dev.driver_data;
  872. if (device == NULL) {
  873. PRINT_ERR("could not get device structure for %s "
  874. "in interrupt\n", cdev->dev.bus_id);
  875. return;
  876. }
  877. request = (struct tape_request *) intparm;
  878. DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
  879. /* On special conditions irb is an error pointer */
  880. if (IS_ERR(irb)) {
  881. switch (PTR_ERR(irb)) {
  882. case -ETIMEDOUT:
  883. PRINT_WARN("(%s): Request timed out\n",
  884. cdev->dev.bus_id);
  885. case -EIO:
  886. tape_handle_killed_request(device, request);
  887. break;
  888. default:
  889. PRINT_ERR("(%s): Unexpected i/o error %li\n",
  890. cdev->dev.bus_id,
  891. PTR_ERR(irb));
  892. }
  893. return;
  894. }
  895. /* May be an unsolicited irq */
  896. if(request != NULL)
  897. request->rescnt = irb->scsw.count;
  898. if (irb->scsw.dstat != 0x0c) {
  899. /* Set the 'ONLINE' flag depending on sense byte 1 */
  900. if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
  901. device->tape_generic_status |= GMT_ONLINE(~0);
  902. else
  903. device->tape_generic_status &= ~GMT_ONLINE(~0);
  904. /*
  905. * Any request that does not come back with channel end
  906. * and device end is unusual. Log the sense data.
  907. */
  908. DBF_EVENT(3,"-- Tape Interrupthandler --\n");
  909. tape_dump_sense_dbf(device, request, irb);
  910. } else {
  911. /* Upon normal completion the device _is_ online */
  912. device->tape_generic_status |= GMT_ONLINE(~0);
  913. }
  914. if (device->tape_state == TS_NOT_OPER) {
  915. DBF_EVENT(6, "tape:device is not operational\n");
  916. return;
  917. }
  918. /*
  919. * Request that were canceled still come back with an interrupt.
  920. * To detect these request the state will be set to TAPE_REQUEST_DONE.
  921. */
  922. if(request != NULL && request->status == TAPE_REQUEST_DONE) {
  923. __tape_remove_request(device, request);
  924. return;
  925. }
  926. rc = device->discipline->irq(device, request, irb);
  927. /*
  928. * rc < 0 : request finished unsuccessfully.
  929. * rc == TAPE_IO_SUCCESS: request finished successfully.
  930. * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
  931. * rc == TAPE_IO_RETRY: request finished but needs another go.
  932. * rc == TAPE_IO_STOP: request needs to get terminated.
  933. */
  934. final = 0;
  935. switch (rc) {
  936. case TAPE_IO_SUCCESS:
  937. /* Upon normal completion the device _is_ online */
  938. device->tape_generic_status |= GMT_ONLINE(~0);
  939. final = 1;
  940. break;
  941. case TAPE_IO_PENDING:
  942. break;
  943. case TAPE_IO_RETRY:
  944. #ifdef CONFIG_S390_TAPE_BLOCK
  945. if (request->op == TO_BLOCK)
  946. device->discipline->check_locate(device, request);
  947. #endif
  948. rc = ccw_device_start(cdev, request->cpaddr,
  949. (unsigned long) request, 0x00,
  950. request->options);
  951. if (rc) {
  952. DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
  953. final = 1;
  954. }
  955. break;
  956. case TAPE_IO_STOP:
  957. __tape_halt_io(device, request);
  958. break;
  959. default:
  960. if (rc > 0) {
  961. DBF_EVENT(6, "xunknownrc\n");
  962. PRINT_ERR("Invalid return code from discipline "
  963. "interrupt function.\n");
  964. rc = -EIO;
  965. }
  966. final = 1;
  967. break;
  968. }
  969. if (final) {
  970. /* May be an unsolicited irq */
  971. if(request != NULL) {
  972. /* Set ending status. */
  973. request->rc = rc;
  974. request->status = TAPE_REQUEST_DONE;
  975. __tape_remove_request(device, request);
  976. } else {
  977. __tape_do_io_list(device);
  978. }
  979. }
  980. }
  981. /*
  982. * Tape device open function used by tape_char & tape_block frontends.
  983. */
  984. int
  985. tape_open(struct tape_device *device)
  986. {
  987. int rc;
  988. spin_lock(get_ccwdev_lock(device->cdev));
  989. if (device->tape_state == TS_NOT_OPER) {
  990. DBF_EVENT(6, "TAPE:nodev\n");
  991. rc = -ENODEV;
  992. } else if (device->tape_state == TS_IN_USE) {
  993. DBF_EVENT(6, "TAPE:dbusy\n");
  994. rc = -EBUSY;
  995. } else if (device->tape_state == TS_BLKUSE) {
  996. DBF_EVENT(6, "TAPE:dbusy\n");
  997. rc = -EBUSY;
  998. } else if (device->discipline != NULL &&
  999. !try_module_get(device->discipline->owner)) {
  1000. DBF_EVENT(6, "TAPE:nodisc\n");
  1001. rc = -ENODEV;
  1002. } else {
  1003. tape_state_set(device, TS_IN_USE);
  1004. rc = 0;
  1005. }
  1006. spin_unlock(get_ccwdev_lock(device->cdev));
  1007. return rc;
  1008. }
  1009. /*
  1010. * Tape device release function used by tape_char & tape_block frontends.
  1011. */
  1012. int
  1013. tape_release(struct tape_device *device)
  1014. {
  1015. spin_lock(get_ccwdev_lock(device->cdev));
  1016. if (device->tape_state == TS_IN_USE)
  1017. tape_state_set(device, TS_UNUSED);
  1018. module_put(device->discipline->owner);
  1019. spin_unlock(get_ccwdev_lock(device->cdev));
  1020. return 0;
  1021. }
  1022. /*
  1023. * Execute a magnetic tape command a number of times.
  1024. */
  1025. int
  1026. tape_mtop(struct tape_device *device, int mt_op, int mt_count)
  1027. {
  1028. tape_mtop_fn fn;
  1029. int rc;
  1030. DBF_EVENT(6, "TAPE:mtio\n");
  1031. DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
  1032. DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
  1033. if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
  1034. return -EINVAL;
  1035. fn = device->discipline->mtop_array[mt_op];
  1036. if (fn == NULL)
  1037. return -EINVAL;
  1038. /* We assume that the backends can handle count up to 500. */
  1039. if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
  1040. mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
  1041. rc = 0;
  1042. for (; mt_count > 500; mt_count -= 500)
  1043. if ((rc = fn(device, 500)) != 0)
  1044. break;
  1045. if (rc == 0)
  1046. rc = fn(device, mt_count);
  1047. } else
  1048. rc = fn(device, mt_count);
  1049. return rc;
  1050. }
  1051. /*
  1052. * Tape init function.
  1053. */
  1054. static int
  1055. tape_init (void)
  1056. {
  1057. TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
  1058. debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
  1059. #ifdef DBF_LIKE_HELL
  1060. debug_set_level(TAPE_DBF_AREA, 6);
  1061. #endif
  1062. DBF_EVENT(3, "tape init: ($Revision: 1.51 $)\n");
  1063. tape_proc_init();
  1064. tapechar_init ();
  1065. tapeblock_init ();
  1066. return 0;
  1067. }
  1068. /*
  1069. * Tape exit function.
  1070. */
  1071. static void
  1072. tape_exit(void)
  1073. {
  1074. DBF_EVENT(6, "tape exit\n");
  1075. /* Get rid of the frontends */
  1076. tapechar_exit();
  1077. tapeblock_exit();
  1078. tape_proc_cleanup();
  1079. debug_unregister (TAPE_DBF_AREA);
  1080. }
  1081. MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
  1082. "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
  1083. MODULE_DESCRIPTION("Linux on zSeries channel attached "
  1084. "tape device driver ($Revision: 1.51 $)");
  1085. MODULE_LICENSE("GPL");
  1086. module_init(tape_init);
  1087. module_exit(tape_exit);
  1088. EXPORT_SYMBOL(tape_generic_remove);
  1089. EXPORT_SYMBOL(tape_generic_probe);
  1090. EXPORT_SYMBOL(tape_generic_online);
  1091. EXPORT_SYMBOL(tape_generic_offline);
  1092. EXPORT_SYMBOL(tape_put_device);
  1093. EXPORT_SYMBOL(tape_get_device_reference);
  1094. EXPORT_SYMBOL(tape_state_verbose);
  1095. EXPORT_SYMBOL(tape_op_verbose);
  1096. EXPORT_SYMBOL(tape_state_set);
  1097. EXPORT_SYMBOL(tape_med_state_set);
  1098. EXPORT_SYMBOL(tape_alloc_request);
  1099. EXPORT_SYMBOL(tape_free_request);
  1100. EXPORT_SYMBOL(tape_dump_sense);
  1101. EXPORT_SYMBOL(tape_dump_sense_dbf);
  1102. EXPORT_SYMBOL(tape_do_io);
  1103. EXPORT_SYMBOL(tape_do_io_async);
  1104. EXPORT_SYMBOL(tape_do_io_interruptible);
  1105. EXPORT_SYMBOL(tape_mtop);