i2o_block.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216
  1. /*
  2. * Block OSM
  3. *
  4. * Copyright (C) 1999-2002 Red Hat Software
  5. *
  6. * Written by Alan Cox, Building Number Three Ltd
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * For the purpose of avoiding doubt the preferred form of the work
  19. * for making modifications shall be a standards compliant form such
  20. * gzipped tar and not one requiring a proprietary or patent encumbered
  21. * tool to unpack.
  22. *
  23. * Fixes/additions:
  24. * Steve Ralston:
  25. * Multiple device handling error fixes,
  26. * Added a queue depth.
  27. * Alan Cox:
  28. * FC920 has an rmw bug. Dont or in the end marker.
  29. * Removed queue walk, fixed for 64bitness.
  30. * Rewrote much of the code over time
  31. * Added indirect block lists
  32. * Handle 64K limits on many controllers
  33. * Don't use indirects on the Promise (breaks)
  34. * Heavily chop down the queue depths
  35. * Deepak Saxena:
  36. * Independent queues per IOP
  37. * Support for dynamic device creation/deletion
  38. * Code cleanup
  39. * Support for larger I/Os through merge* functions
  40. * (taken from DAC960 driver)
  41. * Boji T Kannanthanam:
  42. * Set the I2O Block devices to be detected in increasing
  43. * order of TIDs during boot.
  44. * Search and set the I2O block device that we boot off
  45. * from as the first device to be claimed (as /dev/i2o/hda)
  46. * Properly attach/detach I2O gendisk structure from the
  47. * system gendisk list. The I2O block devices now appear in
  48. * /proc/partitions.
  49. * Markus Lidel <Markus.Lidel@shadowconnect.com>:
  50. * Minor bugfixes for 2.6.
  51. */
  52. #include <linux/module.h>
  53. #include <linux/slab.h>
  54. #include <linux/i2o.h>
  55. #include <linux/mempool.h>
  56. #include <linux/genhd.h>
  57. #include <linux/blkdev.h>
  58. #include <linux/hdreg.h>
  59. #include <scsi/scsi.h>
  60. #include "i2o_block.h"
  61. #define OSM_NAME "block-osm"
  62. #define OSM_VERSION "1.325"
  63. #define OSM_DESCRIPTION "I2O Block Device OSM"
  64. static struct i2o_driver i2o_block_driver;
  65. /* global Block OSM request mempool */
  66. static struct i2o_block_mempool i2o_blk_req_pool;
  67. /* Block OSM class handling definition */
  68. static struct i2o_class_id i2o_block_class_id[] = {
  69. {I2O_CLASS_RANDOM_BLOCK_STORAGE},
  70. {I2O_CLASS_END}
  71. };
  72. /**
  73. * i2o_block_device_free - free the memory of the I2O Block device
  74. * @dev: I2O Block device, which should be cleaned up
  75. *
  76. * Frees the request queue, gendisk and the i2o_block_device structure.
  77. */
  78. static void i2o_block_device_free(struct i2o_block_device *dev)
  79. {
  80. blk_cleanup_queue(dev->gd->queue);
  81. put_disk(dev->gd);
  82. kfree(dev);
  83. };
  84. /**
  85. * i2o_block_remove - remove the I2O Block device from the system again
  86. * @dev: I2O Block device which should be removed
  87. *
  88. * Remove gendisk from system and free all allocated memory.
  89. *
  90. * Always returns 0.
  91. */
  92. static int i2o_block_remove(struct device *dev)
  93. {
  94. struct i2o_device *i2o_dev = to_i2o_device(dev);
  95. struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
  96. osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid,
  97. i2o_blk_dev->gd->disk_name);
  98. i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
  99. del_gendisk(i2o_blk_dev->gd);
  100. dev_set_drvdata(dev, NULL);
  101. i2o_device_claim_release(i2o_dev);
  102. i2o_block_device_free(i2o_blk_dev);
  103. return 0;
  104. };
  105. /**
  106. * i2o_block_device flush - Flush all dirty data of I2O device dev
  107. * @dev: I2O device which should be flushed
  108. *
  109. * Flushes all dirty data on device dev.
  110. *
  111. * Returns 0 on success or negative error code on failure.
  112. */
  113. static int i2o_block_device_flush(struct i2o_device *dev)
  114. {
  115. struct i2o_message *msg;
  116. msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
  117. if (IS_ERR(msg))
  118. return PTR_ERR(msg);
  119. msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
  120. msg->u.head[1] =
  121. cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
  122. lct_data.tid);
  123. msg->body[0] = cpu_to_le32(60 << 16);
  124. osm_debug("Flushing...\n");
  125. return i2o_msg_post_wait(dev->iop, msg, 60);
  126. };
  127. /**
  128. * i2o_block_device_mount - Mount (load) the media of device dev
  129. * @dev: I2O device which should receive the mount request
  130. * @media_id: Media Identifier
  131. *
  132. * Load a media into drive. Identifier should be set to -1, because the
  133. * spec does not support any other value.
  134. *
  135. * Returns 0 on success or negative error code on failure.
  136. */
  137. static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
  138. {
  139. struct i2o_message *msg;
  140. msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
  141. if (IS_ERR(msg))
  142. return PTR_ERR(msg);
  143. msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
  144. msg->u.head[1] =
  145. cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
  146. lct_data.tid);
  147. msg->body[0] = cpu_to_le32(-1);
  148. msg->body[1] = cpu_to_le32(0x00000000);
  149. osm_debug("Mounting...\n");
  150. return i2o_msg_post_wait(dev->iop, msg, 2);
  151. };
  152. /**
  153. * i2o_block_device_lock - Locks the media of device dev
  154. * @dev: I2O device which should receive the lock request
  155. * @media_id: Media Identifier
  156. *
  157. * Lock media of device dev to prevent removal. The media identifier
  158. * should be set to -1, because the spec does not support any other value.
  159. *
  160. * Returns 0 on success or negative error code on failure.
  161. */
  162. static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
  163. {
  164. struct i2o_message *msg;
  165. msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
  166. if (IS_ERR(msg))
  167. return PTR_ERR(msg);
  168. msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
  169. msg->u.head[1] =
  170. cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
  171. lct_data.tid);
  172. msg->body[0] = cpu_to_le32(-1);
  173. osm_debug("Locking...\n");
  174. return i2o_msg_post_wait(dev->iop, msg, 2);
  175. };
  176. /**
  177. * i2o_block_device_unlock - Unlocks the media of device dev
  178. * @dev: I2O device which should receive the unlocked request
  179. * @media_id: Media Identifier
  180. *
  181. * Unlocks the media in device dev. The media identifier should be set to
  182. * -1, because the spec does not support any other value.
  183. *
  184. * Returns 0 on success or negative error code on failure.
  185. */
  186. static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
  187. {
  188. struct i2o_message *msg;
  189. msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
  190. if (IS_ERR(msg))
  191. return PTR_ERR(msg);
  192. msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
  193. msg->u.head[1] =
  194. cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
  195. lct_data.tid);
  196. msg->body[0] = cpu_to_le32(media_id);
  197. osm_debug("Unlocking...\n");
  198. return i2o_msg_post_wait(dev->iop, msg, 2);
  199. };
  200. /**
  201. * i2o_block_device_power - Power management for device dev
  202. * @dev: I2O device which should receive the power management request
  203. * @op: Operation to send
  204. *
  205. * Send a power management request to the device dev.
  206. *
  207. * Returns 0 on success or negative error code on failure.
  208. */
  209. static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
  210. {
  211. struct i2o_device *i2o_dev = dev->i2o_dev;
  212. struct i2o_controller *c = i2o_dev->iop;
  213. struct i2o_message *msg;
  214. int rc;
  215. msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
  216. if (IS_ERR(msg))
  217. return PTR_ERR(msg);
  218. msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
  219. msg->u.head[1] =
  220. cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
  221. lct_data.tid);
  222. msg->body[0] = cpu_to_le32(op << 24);
  223. osm_debug("Power...\n");
  224. rc = i2o_msg_post_wait(c, msg, 60);
  225. if (!rc)
  226. dev->power = op;
  227. return rc;
  228. };
  229. /**
  230. * i2o_block_request_alloc - Allocate an I2O block request struct
  231. *
  232. * Allocates an I2O block request struct and initialize the list.
  233. *
  234. * Returns a i2o_block_request pointer on success or negative error code
  235. * on failure.
  236. */
  237. static inline struct i2o_block_request *i2o_block_request_alloc(void)
  238. {
  239. struct i2o_block_request *ireq;
  240. ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
  241. if (!ireq)
  242. return ERR_PTR(-ENOMEM);
  243. INIT_LIST_HEAD(&ireq->queue);
  244. sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS);
  245. return ireq;
  246. };
  247. /**
  248. * i2o_block_request_free - Frees a I2O block request
  249. * @ireq: I2O block request which should be freed
  250. *
  251. * Frees the allocated memory (give it back to the request mempool).
  252. */
  253. static inline void i2o_block_request_free(struct i2o_block_request *ireq)
  254. {
  255. mempool_free(ireq, i2o_blk_req_pool.pool);
  256. };
  257. /**
  258. * i2o_block_sglist_alloc - Allocate the SG list and map it
  259. * @c: I2O controller to which the request belongs
  260. * @ireq: I2O block request
  261. * @mptr: message body pointer
  262. *
  263. * Builds the SG list and map it to be accessable by the controller.
  264. *
  265. * Returns 0 on failure or 1 on success.
  266. */
  267. static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
  268. struct i2o_block_request *ireq,
  269. u32 ** mptr)
  270. {
  271. int nents;
  272. enum dma_data_direction direction;
  273. ireq->dev = &c->pdev->dev;
  274. nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
  275. if (rq_data_dir(ireq->req) == READ)
  276. direction = PCI_DMA_FROMDEVICE;
  277. else
  278. direction = PCI_DMA_TODEVICE;
  279. ireq->sg_nents = nents;
  280. return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
  281. };
  282. /**
  283. * i2o_block_sglist_free - Frees the SG list
  284. * @ireq: I2O block request from which the SG should be freed
  285. *
  286. * Frees the SG list from the I2O block request.
  287. */
  288. static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
  289. {
  290. enum dma_data_direction direction;
  291. if (rq_data_dir(ireq->req) == READ)
  292. direction = PCI_DMA_FROMDEVICE;
  293. else
  294. direction = PCI_DMA_TODEVICE;
  295. dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
  296. };
  297. /**
  298. * i2o_block_prep_req_fn - Allocates I2O block device specific struct
  299. * @q: request queue for the request
  300. * @req: the request to prepare
  301. *
  302. * Allocate the necessary i2o_block_request struct and connect it to
  303. * the request. This is needed that we not lose the SG list later on.
  304. *
  305. * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
  306. */
  307. static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
  308. {
  309. struct i2o_block_device *i2o_blk_dev = q->queuedata;
  310. struct i2o_block_request *ireq;
  311. if (unlikely(!i2o_blk_dev)) {
  312. osm_err("block device already removed\n");
  313. return BLKPREP_KILL;
  314. }
  315. /* connect the i2o_block_request to the request */
  316. if (!req->special) {
  317. ireq = i2o_block_request_alloc();
  318. if (IS_ERR(ireq)) {
  319. osm_debug("unable to allocate i2o_block_request!\n");
  320. return BLKPREP_DEFER;
  321. }
  322. ireq->i2o_blk_dev = i2o_blk_dev;
  323. req->special = ireq;
  324. ireq->req = req;
  325. }
  326. /* do not come back here */
  327. req->cmd_flags |= REQ_DONTPREP;
  328. return BLKPREP_OK;
  329. };
  330. /**
  331. * i2o_block_delayed_request_fn - delayed request queue function
  332. * @work: the delayed request with the queue to start
  333. *
  334. * If the request queue is stopped for a disk, and there is no open
  335. * request, a new event is created, which calls this function to start
  336. * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
  337. * be started again.
  338. */
  339. static void i2o_block_delayed_request_fn(struct work_struct *work)
  340. {
  341. struct i2o_block_delayed_request *dreq =
  342. container_of(work, struct i2o_block_delayed_request,
  343. work.work);
  344. struct request_queue *q = dreq->queue;
  345. unsigned long flags;
  346. spin_lock_irqsave(q->queue_lock, flags);
  347. blk_start_queue(q);
  348. spin_unlock_irqrestore(q->queue_lock, flags);
  349. kfree(dreq);
  350. };
  351. /**
  352. * i2o_block_end_request - Post-processing of completed commands
  353. * @req: request which should be completed
  354. * @error: 0 for success, < 0 for error
  355. * @nr_bytes: number of bytes to complete
  356. *
  357. * Mark the request as complete. The lock must not be held when entering.
  358. *
  359. */
  360. static void i2o_block_end_request(struct request *req, int error,
  361. int nr_bytes)
  362. {
  363. struct i2o_block_request *ireq = req->special;
  364. struct i2o_block_device *dev = ireq->i2o_blk_dev;
  365. struct request_queue *q = req->q;
  366. unsigned long flags;
  367. if (blk_end_request(req, error, nr_bytes))
  368. if (error)
  369. blk_end_request_all(req, -EIO);
  370. spin_lock_irqsave(q->queue_lock, flags);
  371. if (likely(dev)) {
  372. dev->open_queue_depth--;
  373. list_del(&ireq->queue);
  374. }
  375. blk_start_queue(q);
  376. spin_unlock_irqrestore(q->queue_lock, flags);
  377. i2o_block_sglist_free(ireq);
  378. i2o_block_request_free(ireq);
  379. };
  380. /**
  381. * i2o_block_reply - Block OSM reply handler.
  382. * @c: I2O controller from which the message arrives
  383. * @m: message id of reply
  384. * @msg: the actual I2O message reply
  385. *
  386. * This function gets all the message replies.
  387. *
  388. */
  389. static int i2o_block_reply(struct i2o_controller *c, u32 m,
  390. struct i2o_message *msg)
  391. {
  392. struct request *req;
  393. int error = 0;
  394. req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
  395. if (unlikely(!req)) {
  396. osm_err("NULL reply received!\n");
  397. return -1;
  398. }
  399. /*
  400. * Lets see what is cooking. We stuffed the
  401. * request in the context.
  402. */
  403. if ((le32_to_cpu(msg->body[0]) >> 24) != 0) {
  404. u32 status = le32_to_cpu(msg->body[0]);
  405. /*
  406. * Device not ready means two things. One is that the
  407. * the thing went offline (but not a removal media)
  408. *
  409. * The second is that you have a SuperTrak 100 and the
  410. * firmware got constipated. Unlike standard i2o card
  411. * setups the supertrak returns an error rather than
  412. * blocking for the timeout in these cases.
  413. *
  414. * Don't stick a supertrak100 into cache aggressive modes
  415. */
  416. osm_err("TID %03x error status: 0x%02x, detailed status: "
  417. "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
  418. status >> 24, status & 0xffff);
  419. req->errors++;
  420. error = -EIO;
  421. }
  422. i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
  423. return 1;
  424. };
  425. static void i2o_block_event(struct work_struct *work)
  426. {
  427. struct i2o_event *evt = container_of(work, struct i2o_event, work);
  428. osm_debug("event received\n");
  429. kfree(evt);
  430. };
  431. /*
  432. * SCSI-CAM for ioctl geometry mapping
  433. * Duplicated with SCSI - this should be moved into somewhere common
  434. * perhaps genhd ?
  435. *
  436. * LBA -> CHS mapping table taken from:
  437. *
  438. * "Incorporating the I2O Architecture into BIOS for Intel Architecture
  439. * Platforms"
  440. *
  441. * This is an I2O document that is only available to I2O members,
  442. * not developers.
  443. *
  444. * From my understanding, this is how all the I2O cards do this
  445. *
  446. * Disk Size | Sectors | Heads | Cylinders
  447. * ---------------+---------+-------+-------------------
  448. * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
  449. * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
  450. * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
  451. * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
  452. *
  453. */
  454. #define BLOCK_SIZE_528M 1081344
  455. #define BLOCK_SIZE_1G 2097152
  456. #define BLOCK_SIZE_21G 4403200
  457. #define BLOCK_SIZE_42G 8806400
  458. #define BLOCK_SIZE_84G 17612800
  459. static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
  460. unsigned char *hds, unsigned char *secs)
  461. {
  462. unsigned long heads, sectors, cylinders;
  463. sectors = 63L; /* Maximize sectors per track */
  464. if (capacity <= BLOCK_SIZE_528M)
  465. heads = 16;
  466. else if (capacity <= BLOCK_SIZE_1G)
  467. heads = 32;
  468. else if (capacity <= BLOCK_SIZE_21G)
  469. heads = 64;
  470. else if (capacity <= BLOCK_SIZE_42G)
  471. heads = 128;
  472. else
  473. heads = 255;
  474. cylinders = (unsigned long)capacity / (heads * sectors);
  475. *cyls = (unsigned short)cylinders; /* Stuff return values */
  476. *secs = (unsigned char)sectors;
  477. *hds = (unsigned char)heads;
  478. }
  479. /**
  480. * i2o_block_open - Open the block device
  481. * @bdev: block device being opened
  482. * @mode: file open mode
  483. *
  484. * Power up the device, mount and lock the media. This function is called,
  485. * if the block device is opened for access.
  486. *
  487. * Returns 0 on success or negative error code on failure.
  488. */
  489. static int i2o_block_open(struct block_device *bdev, fmode_t mode)
  490. {
  491. struct i2o_block_device *dev = bdev->bd_disk->private_data;
  492. if (!dev->i2o_dev)
  493. return -ENODEV;
  494. if (dev->power > 0x1f)
  495. i2o_block_device_power(dev, 0x02);
  496. i2o_block_device_mount(dev->i2o_dev, -1);
  497. i2o_block_device_lock(dev->i2o_dev, -1);
  498. osm_debug("Ready.\n");
  499. return 0;
  500. };
  501. /**
  502. * i2o_block_release - Release the I2O block device
  503. * @disk: gendisk device being released
  504. * @mode: file open mode
  505. *
  506. * Unlock and unmount the media, and power down the device. Gets called if
  507. * the block device is closed.
  508. *
  509. * Returns 0 on success or negative error code on failure.
  510. */
  511. static int i2o_block_release(struct gendisk *disk, fmode_t mode)
  512. {
  513. struct i2o_block_device *dev = disk->private_data;
  514. u8 operation;
  515. /*
  516. * This is to deail with the case of an application
  517. * opening a device and then the device dissapears while
  518. * it's in use, and then the application tries to release
  519. * it. ex: Unmounting a deleted RAID volume at reboot.
  520. * If we send messages, it will just cause FAILs since
  521. * the TID no longer exists.
  522. */
  523. if (!dev->i2o_dev)
  524. return 0;
  525. i2o_block_device_flush(dev->i2o_dev);
  526. i2o_block_device_unlock(dev->i2o_dev, -1);
  527. if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
  528. operation = 0x21;
  529. else
  530. operation = 0x24;
  531. i2o_block_device_power(dev, operation);
  532. return 0;
  533. }
  534. static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  535. {
  536. i2o_block_biosparam(get_capacity(bdev->bd_disk),
  537. &geo->cylinders, &geo->heads, &geo->sectors);
  538. return 0;
  539. }
  540. /**
  541. * i2o_block_ioctl - Issue device specific ioctl calls.
  542. * @bdev: block device being opened
  543. * @mode: file open mode
  544. * @cmd: ioctl command
  545. * @arg: arg
  546. *
  547. * Handles ioctl request for the block device.
  548. *
  549. * Return 0 on success or negative error on failure.
  550. */
  551. static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
  552. unsigned int cmd, unsigned long arg)
  553. {
  554. struct gendisk *disk = bdev->bd_disk;
  555. struct i2o_block_device *dev = disk->private_data;
  556. /* Anyone capable of this syscall can do *real bad* things */
  557. if (!capable(CAP_SYS_ADMIN))
  558. return -EPERM;
  559. switch (cmd) {
  560. case BLKI2OGRSTRAT:
  561. return put_user(dev->rcache, (int __user *)arg);
  562. case BLKI2OGWSTRAT:
  563. return put_user(dev->wcache, (int __user *)arg);
  564. case BLKI2OSRSTRAT:
  565. if (arg < 0 || arg > CACHE_SMARTFETCH)
  566. return -EINVAL;
  567. dev->rcache = arg;
  568. break;
  569. case BLKI2OSWSTRAT:
  570. if (arg != 0
  571. && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
  572. return -EINVAL;
  573. dev->wcache = arg;
  574. break;
  575. }
  576. return -ENOTTY;
  577. };
  578. /**
  579. * i2o_block_media_changed - Have we seen a media change?
  580. * @disk: gendisk which should be verified
  581. *
  582. * Verifies if the media has changed.
  583. *
  584. * Returns 1 if the media was changed or 0 otherwise.
  585. */
  586. static int i2o_block_media_changed(struct gendisk *disk)
  587. {
  588. struct i2o_block_device *p = disk->private_data;
  589. if (p->media_change_flag) {
  590. p->media_change_flag = 0;
  591. return 1;
  592. }
  593. return 0;
  594. }
  595. /**
  596. * i2o_block_transfer - Transfer a request to/from the I2O controller
  597. * @req: the request which should be transfered
  598. *
  599. * This function converts the request into a I2O message. The necessary
  600. * DMA buffers are allocated and after everything is setup post the message
  601. * to the I2O controller. No cleanup is done by this function. It is done
  602. * on the interrupt side when the reply arrives.
  603. *
  604. * Return 0 on success or negative error code on failure.
  605. */
  606. static int i2o_block_transfer(struct request *req)
  607. {
  608. struct i2o_block_device *dev = req->rq_disk->private_data;
  609. struct i2o_controller *c;
  610. u32 tid = dev->i2o_dev->lct_data.tid;
  611. struct i2o_message *msg;
  612. u32 *mptr;
  613. struct i2o_block_request *ireq = req->special;
  614. u32 tcntxt;
  615. u32 sgl_offset = SGL_OFFSET_8;
  616. u32 ctl_flags = 0x00000000;
  617. int rc;
  618. u32 cmd;
  619. if (unlikely(!dev->i2o_dev)) {
  620. osm_err("transfer to removed drive\n");
  621. rc = -ENODEV;
  622. goto exit;
  623. }
  624. c = dev->i2o_dev->iop;
  625. msg = i2o_msg_get(c);
  626. if (IS_ERR(msg)) {
  627. rc = PTR_ERR(msg);
  628. goto exit;
  629. }
  630. tcntxt = i2o_cntxt_list_add(c, req);
  631. if (!tcntxt) {
  632. rc = -ENOMEM;
  633. goto nop_msg;
  634. }
  635. msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
  636. msg->u.s.tcntxt = cpu_to_le32(tcntxt);
  637. mptr = &msg->body[0];
  638. if (rq_data_dir(req) == READ) {
  639. cmd = I2O_CMD_BLOCK_READ << 24;
  640. switch (dev->rcache) {
  641. case CACHE_PREFETCH:
  642. ctl_flags = 0x201F0008;
  643. break;
  644. case CACHE_SMARTFETCH:
  645. if (blk_rq_sectors(req) > 16)
  646. ctl_flags = 0x201F0008;
  647. else
  648. ctl_flags = 0x001F0000;
  649. break;
  650. default:
  651. break;
  652. }
  653. } else {
  654. cmd = I2O_CMD_BLOCK_WRITE << 24;
  655. switch (dev->wcache) {
  656. case CACHE_WRITETHROUGH:
  657. ctl_flags = 0x001F0008;
  658. break;
  659. case CACHE_WRITEBACK:
  660. ctl_flags = 0x001F0010;
  661. break;
  662. case CACHE_SMARTBACK:
  663. if (blk_rq_sectors(req) > 16)
  664. ctl_flags = 0x001F0004;
  665. else
  666. ctl_flags = 0x001F0010;
  667. break;
  668. case CACHE_SMARTTHROUGH:
  669. if (blk_rq_sectors(req) > 16)
  670. ctl_flags = 0x001F0004;
  671. else
  672. ctl_flags = 0x001F0010;
  673. default:
  674. break;
  675. }
  676. }
  677. #ifdef CONFIG_I2O_EXT_ADAPTEC
  678. if (c->adaptec) {
  679. u8 cmd[10];
  680. u32 scsi_flags;
  681. u16 hwsec;
  682. hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
  683. memset(cmd, 0, 10);
  684. sgl_offset = SGL_OFFSET_12;
  685. msg->u.head[1] =
  686. cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
  687. *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
  688. *mptr++ = cpu_to_le32(tid);
  689. /*
  690. * ENABLE_DISCONNECT
  691. * SIMPLE_TAG
  692. * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
  693. */
  694. if (rq_data_dir(req) == READ) {
  695. cmd[0] = READ_10;
  696. scsi_flags = 0x60a0000a;
  697. } else {
  698. cmd[0] = WRITE_10;
  699. scsi_flags = 0xa0a0000a;
  700. }
  701. *mptr++ = cpu_to_le32(scsi_flags);
  702. *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
  703. *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
  704. memcpy(mptr, cmd, 10);
  705. mptr += 4;
  706. *mptr++ = cpu_to_le32(blk_rq_bytes(req));
  707. } else
  708. #endif
  709. {
  710. msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
  711. *mptr++ = cpu_to_le32(ctl_flags);
  712. *mptr++ = cpu_to_le32(blk_rq_bytes(req));
  713. *mptr++ =
  714. cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
  715. *mptr++ =
  716. cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
  717. }
  718. if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
  719. rc = -ENOMEM;
  720. goto context_remove;
  721. }
  722. msg->u.head[0] =
  723. cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
  724. list_add_tail(&ireq->queue, &dev->open_queue);
  725. dev->open_queue_depth++;
  726. i2o_msg_post(c, msg);
  727. return 0;
  728. context_remove:
  729. i2o_cntxt_list_remove(c, req);
  730. nop_msg:
  731. i2o_msg_nop(c, msg);
  732. exit:
  733. return rc;
  734. };
  735. /**
  736. * i2o_block_request_fn - request queue handling function
  737. * @q: request queue from which the request could be fetched
  738. *
  739. * Takes the next request from the queue, transfers it and if no error
  740. * occurs dequeue it from the queue. On arrival of the reply the message
  741. * will be processed further. If an error occurs requeue the request.
  742. */
  743. static void i2o_block_request_fn(struct request_queue *q)
  744. {
  745. struct request *req;
  746. while (!blk_queue_plugged(q)) {
  747. req = blk_peek_request(q);
  748. if (!req)
  749. break;
  750. if (blk_fs_request(req)) {
  751. struct i2o_block_delayed_request *dreq;
  752. struct i2o_block_request *ireq = req->special;
  753. unsigned int queue_depth;
  754. queue_depth = ireq->i2o_blk_dev->open_queue_depth;
  755. if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
  756. if (!i2o_block_transfer(req)) {
  757. blk_start_request(req);
  758. continue;
  759. } else
  760. osm_info("transfer error\n");
  761. }
  762. if (queue_depth)
  763. break;
  764. /* stop the queue and retry later */
  765. dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
  766. if (!dreq)
  767. continue;
  768. dreq->queue = q;
  769. INIT_DELAYED_WORK(&dreq->work,
  770. i2o_block_delayed_request_fn);
  771. if (!queue_delayed_work(i2o_block_driver.event_queue,
  772. &dreq->work,
  773. I2O_BLOCK_RETRY_TIME))
  774. kfree(dreq);
  775. else {
  776. blk_stop_queue(q);
  777. break;
  778. }
  779. } else {
  780. blk_start_request(req);
  781. __blk_end_request_all(req, -EIO);
  782. }
  783. }
  784. };
  785. /* I2O Block device operations definition */
  786. static const struct block_device_operations i2o_block_fops = {
  787. .owner = THIS_MODULE,
  788. .open = i2o_block_open,
  789. .release = i2o_block_release,
  790. .locked_ioctl = i2o_block_ioctl,
  791. .getgeo = i2o_block_getgeo,
  792. .media_changed = i2o_block_media_changed
  793. };
  794. /**
  795. * i2o_block_device_alloc - Allocate memory for a I2O Block device
  796. *
  797. * Allocate memory for the i2o_block_device struct, gendisk and request
  798. * queue and initialize them as far as no additional information is needed.
  799. *
  800. * Returns a pointer to the allocated I2O Block device on success or a
  801. * negative error code on failure.
  802. */
  803. static struct i2o_block_device *i2o_block_device_alloc(void)
  804. {
  805. struct i2o_block_device *dev;
  806. struct gendisk *gd;
  807. struct request_queue *queue;
  808. int rc;
  809. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  810. if (!dev) {
  811. osm_err("Insufficient memory to allocate I2O Block disk.\n");
  812. rc = -ENOMEM;
  813. goto exit;
  814. }
  815. INIT_LIST_HEAD(&dev->open_queue);
  816. spin_lock_init(&dev->lock);
  817. dev->rcache = CACHE_PREFETCH;
  818. dev->wcache = CACHE_WRITEBACK;
  819. /* allocate a gendisk with 16 partitions */
  820. gd = alloc_disk(16);
  821. if (!gd) {
  822. osm_err("Insufficient memory to allocate gendisk.\n");
  823. rc = -ENOMEM;
  824. goto cleanup_dev;
  825. }
  826. /* initialize the request queue */
  827. queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
  828. if (!queue) {
  829. osm_err("Insufficient memory to allocate request queue.\n");
  830. rc = -ENOMEM;
  831. goto cleanup_queue;
  832. }
  833. blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
  834. gd->major = I2O_MAJOR;
  835. gd->queue = queue;
  836. gd->fops = &i2o_block_fops;
  837. gd->private_data = dev;
  838. dev->gd = gd;
  839. return dev;
  840. cleanup_queue:
  841. put_disk(gd);
  842. cleanup_dev:
  843. kfree(dev);
  844. exit:
  845. return ERR_PTR(rc);
  846. };
  847. /**
  848. * i2o_block_probe - verify if dev is a I2O Block device and install it
  849. * @dev: device to verify if it is a I2O Block device
  850. *
  851. * We only verify if the user_tid of the device is 0xfff and then install
  852. * the device. Otherwise it is used by some other device (e. g. RAID).
  853. *
  854. * Returns 0 on success or negative error code on failure.
  855. */
  856. static int i2o_block_probe(struct device *dev)
  857. {
  858. struct i2o_device *i2o_dev = to_i2o_device(dev);
  859. struct i2o_controller *c = i2o_dev->iop;
  860. struct i2o_block_device *i2o_blk_dev;
  861. struct gendisk *gd;
  862. struct request_queue *queue;
  863. static int unit = 0;
  864. int rc;
  865. u64 size;
  866. u32 blocksize;
  867. u16 body_size = 4;
  868. u16 power;
  869. unsigned short max_sectors;
  870. #ifdef CONFIG_I2O_EXT_ADAPTEC
  871. if (c->adaptec)
  872. body_size = 8;
  873. #endif
  874. if (c->limit_sectors)
  875. max_sectors = I2O_MAX_SECTORS_LIMITED;
  876. else
  877. max_sectors = I2O_MAX_SECTORS;
  878. /* skip devices which are used by IOP */
  879. if (i2o_dev->lct_data.user_tid != 0xfff) {
  880. osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
  881. return -ENODEV;
  882. }
  883. if (i2o_device_claim(i2o_dev)) {
  884. osm_warn("Unable to claim device. Installation aborted\n");
  885. rc = -EFAULT;
  886. goto exit;
  887. }
  888. i2o_blk_dev = i2o_block_device_alloc();
  889. if (IS_ERR(i2o_blk_dev)) {
  890. osm_err("could not alloc a new I2O block device");
  891. rc = PTR_ERR(i2o_blk_dev);
  892. goto claim_release;
  893. }
  894. i2o_blk_dev->i2o_dev = i2o_dev;
  895. dev_set_drvdata(dev, i2o_blk_dev);
  896. /* setup gendisk */
  897. gd = i2o_blk_dev->gd;
  898. gd->first_minor = unit << 4;
  899. sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
  900. gd->driverfs_dev = &i2o_dev->device;
  901. /* setup request queue */
  902. queue = gd->queue;
  903. queue->queuedata = i2o_blk_dev;
  904. blk_queue_max_hw_sectors(queue, max_sectors);
  905. blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
  906. osm_debug("max sectors = %d\n", queue->max_sectors);
  907. osm_debug("phys segments = %d\n", queue->max_phys_segments);
  908. osm_debug("max hw segments = %d\n", queue->max_hw_segments);
  909. /*
  910. * Ask for the current media data. If that isn't supported
  911. * then we ask for the device capacity data
  912. */
  913. if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
  914. !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
  915. blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
  916. } else
  917. osm_warn("unable to get blocksize of %s\n", gd->disk_name);
  918. if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
  919. !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
  920. set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
  921. } else
  922. osm_warn("could not get size of %s\n", gd->disk_name);
  923. if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
  924. i2o_blk_dev->power = power;
  925. i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
  926. add_disk(gd);
  927. unit++;
  928. osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,
  929. i2o_blk_dev->gd->disk_name);
  930. return 0;
  931. claim_release:
  932. i2o_device_claim_release(i2o_dev);
  933. exit:
  934. return rc;
  935. };
  936. /* Block OSM driver struct */
  937. static struct i2o_driver i2o_block_driver = {
  938. .name = OSM_NAME,
  939. .event = i2o_block_event,
  940. .reply = i2o_block_reply,
  941. .classes = i2o_block_class_id,
  942. .driver = {
  943. .probe = i2o_block_probe,
  944. .remove = i2o_block_remove,
  945. },
  946. };
  947. /**
  948. * i2o_block_init - Block OSM initialization function
  949. *
  950. * Allocate the slab and mempool for request structs, registers i2o_block
  951. * block device and finally register the Block OSM in the I2O core.
  952. *
  953. * Returns 0 on success or negative error code on failure.
  954. */
  955. static int __init i2o_block_init(void)
  956. {
  957. int rc;
  958. int size;
  959. printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
  960. /* Allocate request mempool and slab */
  961. size = sizeof(struct i2o_block_request);
  962. i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
  963. SLAB_HWCACHE_ALIGN, NULL);
  964. if (!i2o_blk_req_pool.slab) {
  965. osm_err("can't init request slab\n");
  966. rc = -ENOMEM;
  967. goto exit;
  968. }
  969. i2o_blk_req_pool.pool =
  970. mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
  971. i2o_blk_req_pool.slab);
  972. if (!i2o_blk_req_pool.pool) {
  973. osm_err("can't init request mempool\n");
  974. rc = -ENOMEM;
  975. goto free_slab;
  976. }
  977. /* Register the block device interfaces */
  978. rc = register_blkdev(I2O_MAJOR, "i2o_block");
  979. if (rc) {
  980. osm_err("unable to register block device\n");
  981. goto free_mempool;
  982. }
  983. #ifdef MODULE
  984. osm_info("registered device at major %d\n", I2O_MAJOR);
  985. #endif
  986. /* Register Block OSM into I2O core */
  987. rc = i2o_driver_register(&i2o_block_driver);
  988. if (rc) {
  989. osm_err("Could not register Block driver\n");
  990. goto unregister_blkdev;
  991. }
  992. return 0;
  993. unregister_blkdev:
  994. unregister_blkdev(I2O_MAJOR, "i2o_block");
  995. free_mempool:
  996. mempool_destroy(i2o_blk_req_pool.pool);
  997. free_slab:
  998. kmem_cache_destroy(i2o_blk_req_pool.slab);
  999. exit:
  1000. return rc;
  1001. };
  1002. /**
  1003. * i2o_block_exit - Block OSM exit function
  1004. *
  1005. * Unregisters Block OSM from I2O core, unregisters i2o_block block device
  1006. * and frees the mempool and slab.
  1007. */
  1008. static void __exit i2o_block_exit(void)
  1009. {
  1010. /* Unregister I2O Block OSM from I2O core */
  1011. i2o_driver_unregister(&i2o_block_driver);
  1012. /* Unregister block device */
  1013. unregister_blkdev(I2O_MAJOR, "i2o_block");
  1014. /* Free request mempool and slab */
  1015. mempool_destroy(i2o_blk_req_pool.pool);
  1016. kmem_cache_destroy(i2o_blk_req_pool.slab);
  1017. };
  1018. MODULE_AUTHOR("Red Hat");
  1019. MODULE_LICENSE("GPL");
  1020. MODULE_DESCRIPTION(OSM_DESCRIPTION);
  1021. MODULE_VERSION(OSM_VERSION);
  1022. module_init(i2o_block_init);
  1023. module_exit(i2o_block_exit);