blkdev.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. #ifndef _LINUX_BLKDEV_H
  2. #define _LINUX_BLKDEV_H
  3. #include <linux/config.h>
  4. #include <linux/major.h>
  5. #include <linux/genhd.h>
  6. #include <linux/list.h>
  7. #include <linux/timer.h>
  8. #include <linux/workqueue.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/wait.h>
  12. #include <linux/mempool.h>
  13. #include <linux/bio.h>
  14. #include <linux/module.h>
  15. #include <linux/stringify.h>
  16. #include <asm/scatterlist.h>
  17. struct request_queue;
  18. typedef struct request_queue request_queue_t;
  19. struct elevator_queue;
  20. typedef struct elevator_queue elevator_t;
  21. struct request_pm_state;
  22. #define BLKDEV_MIN_RQ 4
  23. #define BLKDEV_MAX_RQ 128 /* Default maximum */
  24. /*
  25. * This is the per-process anticipatory I/O scheduler state.
  26. */
  27. struct as_io_context {
  28. spinlock_t lock;
  29. void (*dtor)(struct as_io_context *aic); /* destructor */
  30. void (*exit)(struct as_io_context *aic); /* called on task exit */
  31. unsigned long state;
  32. atomic_t nr_queued; /* queued reads & sync writes */
  33. atomic_t nr_dispatched; /* number of requests gone to the drivers */
  34. /* IO History tracking */
  35. /* Thinktime */
  36. unsigned long last_end_request;
  37. unsigned long ttime_total;
  38. unsigned long ttime_samples;
  39. unsigned long ttime_mean;
  40. /* Layout pattern */
  41. unsigned int seek_samples;
  42. sector_t last_request_pos;
  43. u64 seek_total;
  44. sector_t seek_mean;
  45. };
  46. struct cfq_queue;
  47. struct cfq_io_context {
  48. void (*dtor)(struct cfq_io_context *);
  49. void (*exit)(struct cfq_io_context *);
  50. struct io_context *ioc;
  51. /*
  52. * circular list of cfq_io_contexts belonging to a process io context
  53. */
  54. struct list_head list;
  55. struct cfq_queue *cfqq;
  56. };
  57. /*
  58. * This is the per-process I/O subsystem state. It is refcounted and
  59. * kmalloc'ed. Currently all fields are modified in process io context
  60. * (apart from the atomic refcount), so require no locking.
  61. */
  62. struct io_context {
  63. atomic_t refcount;
  64. pid_t pid;
  65. /*
  66. * For request batching
  67. */
  68. unsigned long last_waited; /* Time last woken after wait for request */
  69. int nr_batch_requests; /* Number of requests left in the batch */
  70. spinlock_t lock;
  71. struct as_io_context *aic;
  72. struct cfq_io_context *cic;
  73. };
  74. void put_io_context(struct io_context *ioc);
  75. void exit_io_context(void);
  76. struct io_context *get_io_context(int gfp_flags);
  77. void copy_io_context(struct io_context **pdst, struct io_context **psrc);
  78. void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
  79. struct request;
  80. typedef void (rq_end_io_fn)(struct request *);
  81. struct request_list {
  82. int count[2];
  83. int starved[2];
  84. mempool_t *rq_pool;
  85. wait_queue_head_t wait[2];
  86. wait_queue_head_t drain;
  87. };
  88. #define BLK_MAX_CDB 16
  89. /*
  90. * try to put the fields that are referenced together in the same cacheline
  91. */
  92. struct request {
  93. struct list_head queuelist; /* looking for ->queue? you must _not_
  94. * access it directly, use
  95. * blkdev_dequeue_request! */
  96. unsigned long flags; /* see REQ_ bits below */
  97. /* Maintain bio traversal state for part by part I/O submission.
  98. * hard_* are block layer internals, no driver should touch them!
  99. */
  100. sector_t sector; /* next sector to submit */
  101. unsigned long nr_sectors; /* no. of sectors left to submit */
  102. /* no. of sectors left to submit in the current segment */
  103. unsigned int current_nr_sectors;
  104. sector_t hard_sector; /* next sector to complete */
  105. unsigned long hard_nr_sectors; /* no. of sectors left to complete */
  106. /* no. of sectors left to complete in the current segment */
  107. unsigned int hard_cur_sectors;
  108. struct bio *bio;
  109. struct bio *biotail;
  110. void *elevator_private;
  111. int rq_status; /* should split this into a few status bits */
  112. struct gendisk *rq_disk;
  113. int errors;
  114. unsigned long start_time;
  115. /* Number of scatter-gather DMA addr+len pairs after
  116. * physical address coalescing is performed.
  117. */
  118. unsigned short nr_phys_segments;
  119. /* Number of scatter-gather addr+len pairs after
  120. * physical and DMA remapping hardware coalescing is performed.
  121. * This is the number of scatter-gather entries the driver
  122. * will actually have to deal with after DMA mapping is done.
  123. */
  124. unsigned short nr_hw_segments;
  125. int tag;
  126. char *buffer;
  127. int ref_count;
  128. request_queue_t *q;
  129. struct request_list *rl;
  130. struct completion *waiting;
  131. void *special;
  132. /*
  133. * when request is used as a packet command carrier
  134. */
  135. unsigned int cmd_len;
  136. unsigned char cmd[BLK_MAX_CDB];
  137. unsigned int data_len;
  138. void *data;
  139. unsigned int sense_len;
  140. void *sense;
  141. unsigned int timeout;
  142. /*
  143. * For Power Management requests
  144. */
  145. struct request_pm_state *pm;
  146. /*
  147. * completion callback. end_io_data should be folded in with waiting
  148. */
  149. rq_end_io_fn *end_io;
  150. void *end_io_data;
  151. };
  152. /*
  153. * first three bits match BIO_RW* bits, important
  154. */
  155. enum rq_flag_bits {
  156. __REQ_RW, /* not set, read. set, write */
  157. __REQ_FAILFAST, /* no low level driver retries */
  158. __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
  159. __REQ_HARDBARRIER, /* may not be passed by drive either */
  160. __REQ_CMD, /* is a regular fs rw request */
  161. __REQ_NOMERGE, /* don't touch this for merging */
  162. __REQ_STARTED, /* drive already may have started this one */
  163. __REQ_DONTPREP, /* don't call prep for this one */
  164. __REQ_QUEUED, /* uses queueing */
  165. /*
  166. * for ATA/ATAPI devices
  167. */
  168. __REQ_PC, /* packet command (special) */
  169. __REQ_BLOCK_PC, /* queued down pc from block layer */
  170. __REQ_SENSE, /* sense retrival */
  171. __REQ_FAILED, /* set if the request failed */
  172. __REQ_QUIET, /* don't worry about errors */
  173. __REQ_SPECIAL, /* driver suplied command */
  174. __REQ_DRIVE_CMD,
  175. __REQ_DRIVE_TASK,
  176. __REQ_DRIVE_TASKFILE,
  177. __REQ_PREEMPT, /* set for "ide_preempt" requests */
  178. __REQ_PM_SUSPEND, /* suspend request */
  179. __REQ_PM_RESUME, /* resume request */
  180. __REQ_PM_SHUTDOWN, /* shutdown request */
  181. __REQ_BAR_PREFLUSH, /* barrier pre-flush done */
  182. __REQ_BAR_POSTFLUSH, /* barrier post-flush */
  183. __REQ_BAR_FLUSH, /* rq is the flush request */
  184. __REQ_NR_BITS, /* stops here */
  185. };
  186. #define REQ_RW (1 << __REQ_RW)
  187. #define REQ_FAILFAST (1 << __REQ_FAILFAST)
  188. #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
  189. #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
  190. #define REQ_CMD (1 << __REQ_CMD)
  191. #define REQ_NOMERGE (1 << __REQ_NOMERGE)
  192. #define REQ_STARTED (1 << __REQ_STARTED)
  193. #define REQ_DONTPREP (1 << __REQ_DONTPREP)
  194. #define REQ_QUEUED (1 << __REQ_QUEUED)
  195. #define REQ_PC (1 << __REQ_PC)
  196. #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
  197. #define REQ_SENSE (1 << __REQ_SENSE)
  198. #define REQ_FAILED (1 << __REQ_FAILED)
  199. #define REQ_QUIET (1 << __REQ_QUIET)
  200. #define REQ_SPECIAL (1 << __REQ_SPECIAL)
  201. #define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
  202. #define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
  203. #define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE)
  204. #define REQ_PREEMPT (1 << __REQ_PREEMPT)
  205. #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
  206. #define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
  207. #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
  208. #define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH)
  209. #define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
  210. #define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
  211. /*
  212. * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
  213. * requests. Some step values could eventually be made generic.
  214. */
  215. struct request_pm_state
  216. {
  217. /* PM state machine step value, currently driver specific */
  218. int pm_step;
  219. /* requested PM state value (S1, S2, S3, S4, ...) */
  220. u32 pm_state;
  221. void* data; /* for driver use */
  222. };
  223. #include <linux/elevator.h>
  224. typedef int (merge_request_fn) (request_queue_t *, struct request *,
  225. struct bio *);
  226. typedef int (merge_requests_fn) (request_queue_t *, struct request *,
  227. struct request *);
  228. typedef void (request_fn_proc) (request_queue_t *q);
  229. typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
  230. typedef int (prep_rq_fn) (request_queue_t *, struct request *);
  231. typedef void (unplug_fn) (request_queue_t *);
  232. struct bio_vec;
  233. typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
  234. typedef void (activity_fn) (void *data, int rw);
  235. typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
  236. typedef int (prepare_flush_fn) (request_queue_t *, struct request *);
  237. typedef void (end_flush_fn) (request_queue_t *, struct request *);
  238. enum blk_queue_state {
  239. Queue_down,
  240. Queue_up,
  241. };
  242. struct blk_queue_tag {
  243. struct request **tag_index; /* map of busy tags */
  244. unsigned long *tag_map; /* bit map of free/busy tags */
  245. struct list_head busy_list; /* fifo list of busy tags */
  246. int busy; /* current depth */
  247. int max_depth; /* what we will send to device */
  248. atomic_t refcnt; /* map can be shared */
  249. };
  250. struct request_queue
  251. {
  252. /*
  253. * Together with queue_head for cacheline sharing
  254. */
  255. struct list_head queue_head;
  256. struct request *last_merge;
  257. elevator_t *elevator;
  258. /*
  259. * the queue request freelist, one for reads and one for writes
  260. */
  261. struct request_list rq;
  262. request_fn_proc *request_fn;
  263. merge_request_fn *back_merge_fn;
  264. merge_request_fn *front_merge_fn;
  265. merge_requests_fn *merge_requests_fn;
  266. make_request_fn *make_request_fn;
  267. prep_rq_fn *prep_rq_fn;
  268. unplug_fn *unplug_fn;
  269. merge_bvec_fn *merge_bvec_fn;
  270. activity_fn *activity_fn;
  271. issue_flush_fn *issue_flush_fn;
  272. prepare_flush_fn *prepare_flush_fn;
  273. end_flush_fn *end_flush_fn;
  274. /*
  275. * Auto-unplugging state
  276. */
  277. struct timer_list unplug_timer;
  278. int unplug_thresh; /* After this many requests */
  279. unsigned long unplug_delay; /* After this many jiffies */
  280. struct work_struct unplug_work;
  281. struct backing_dev_info backing_dev_info;
  282. /*
  283. * The queue owner gets to use this for whatever they like.
  284. * ll_rw_blk doesn't touch it.
  285. */
  286. void *queuedata;
  287. void *activity_data;
  288. /*
  289. * queue needs bounce pages for pages above this limit
  290. */
  291. unsigned long bounce_pfn;
  292. unsigned int bounce_gfp;
  293. /*
  294. * various queue flags, see QUEUE_* below
  295. */
  296. unsigned long queue_flags;
  297. /*
  298. * protects queue structures from reentrancy. ->__queue_lock should
  299. * _never_ be used directly, it is queue private. always use
  300. * ->queue_lock.
  301. */
  302. spinlock_t __queue_lock;
  303. spinlock_t *queue_lock;
  304. /*
  305. * queue kobject
  306. */
  307. struct kobject kobj;
  308. /*
  309. * queue settings
  310. */
  311. unsigned long nr_requests; /* Max # of requests */
  312. unsigned int nr_congestion_on;
  313. unsigned int nr_congestion_off;
  314. unsigned int nr_batching;
  315. unsigned short max_sectors;
  316. unsigned short max_hw_sectors;
  317. unsigned short max_phys_segments;
  318. unsigned short max_hw_segments;
  319. unsigned short hardsect_size;
  320. unsigned int max_segment_size;
  321. unsigned long seg_boundary_mask;
  322. unsigned int dma_alignment;
  323. struct blk_queue_tag *queue_tags;
  324. atomic_t refcnt;
  325. unsigned int in_flight;
  326. /*
  327. * sg stuff
  328. */
  329. unsigned int sg_timeout;
  330. unsigned int sg_reserved_size;
  331. int node;
  332. struct list_head drain_list;
  333. /*
  334. * reserved for flush operations
  335. */
  336. struct request *flush_rq;
  337. unsigned char ordered;
  338. };
  339. enum {
  340. QUEUE_ORDERED_NONE,
  341. QUEUE_ORDERED_TAG,
  342. QUEUE_ORDERED_FLUSH,
  343. };
  344. #define RQ_INACTIVE (-1)
  345. #define RQ_ACTIVE 1
  346. #define RQ_SCSI_BUSY 0xffff
  347. #define RQ_SCSI_DONE 0xfffe
  348. #define RQ_SCSI_DISCONNECTING 0xffe0
  349. #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
  350. #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
  351. #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
  352. #define QUEUE_FLAG_READFULL 3 /* write queue has been filled */
  353. #define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */
  354. #define QUEUE_FLAG_DEAD 5 /* queue being torn down */
  355. #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
  356. #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
  357. #define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */
  358. #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */
  359. #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
  360. #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
  361. #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
  362. #define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags)
  363. #define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
  364. #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
  365. #define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST)
  366. #define blk_rq_started(rq) ((rq)->flags & REQ_STARTED)
  367. #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
  368. #define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND)
  369. #define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME)
  370. #define blk_pm_request(rq) \
  371. ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
  372. #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
  373. #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
  374. #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
  375. #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
  376. #define rq_data_dir(rq) ((rq)->flags & 1)
  377. static inline int blk_queue_full(struct request_queue *q, int rw)
  378. {
  379. if (rw == READ)
  380. return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
  381. return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
  382. }
  383. static inline void blk_set_queue_full(struct request_queue *q, int rw)
  384. {
  385. if (rw == READ)
  386. set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
  387. else
  388. set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
  389. }
  390. static inline void blk_clear_queue_full(struct request_queue *q, int rw)
  391. {
  392. if (rw == READ)
  393. clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
  394. else
  395. clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
  396. }
  397. /*
  398. * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
  399. * it already be started by driver.
  400. */
  401. #define RQ_NOMERGE_FLAGS \
  402. (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
  403. #define rq_mergeable(rq) \
  404. (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
  405. /*
  406. * noop, requests are automagically marked as active/inactive by I/O
  407. * scheduler -- see elv_next_request
  408. */
  409. #define blk_queue_headactive(q, head_active)
  410. /*
  411. * q->prep_rq_fn return values
  412. */
  413. #define BLKPREP_OK 0 /* serve it */
  414. #define BLKPREP_KILL 1 /* fatal error, kill */
  415. #define BLKPREP_DEFER 2 /* leave on queue */
  416. extern unsigned long blk_max_low_pfn, blk_max_pfn;
  417. /*
  418. * standard bounce addresses:
  419. *
  420. * BLK_BOUNCE_HIGH : bounce all highmem pages
  421. * BLK_BOUNCE_ANY : don't bounce anything
  422. * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
  423. */
  424. #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
  425. #define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)
  426. #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
  427. #ifdef CONFIG_MMU
  428. extern int init_emergency_isa_pool(void);
  429. extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
  430. #else
  431. static inline int init_emergency_isa_pool(void)
  432. {
  433. return 0;
  434. }
  435. static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
  436. {
  437. }
  438. #endif /* CONFIG_MMU */
  439. #define rq_for_each_bio(_bio, rq) \
  440. if ((rq->bio)) \
  441. for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
  442. struct sec_size {
  443. unsigned block_size;
  444. unsigned block_size_bits;
  445. };
  446. extern int blk_register_queue(struct gendisk *disk);
  447. extern void blk_unregister_queue(struct gendisk *disk);
  448. extern void register_disk(struct gendisk *dev);
  449. extern void generic_make_request(struct bio *bio);
  450. extern void blk_put_request(struct request *);
  451. extern void blk_end_sync_rq(struct request *rq);
  452. extern void blk_attempt_remerge(request_queue_t *, struct request *);
  453. extern struct request *blk_get_request(request_queue_t *, int, int);
  454. extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
  455. extern void blk_requeue_request(request_queue_t *, struct request *);
  456. extern void blk_plug_device(request_queue_t *);
  457. extern int blk_remove_plug(request_queue_t *);
  458. extern void blk_recount_segments(request_queue_t *, struct bio *);
  459. extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
  460. extern void blk_start_queue(request_queue_t *q);
  461. extern void blk_stop_queue(request_queue_t *q);
  462. extern void blk_sync_queue(struct request_queue *q);
  463. extern void __blk_stop_queue(request_queue_t *q);
  464. extern void blk_run_queue(request_queue_t *);
  465. extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
  466. extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int);
  467. extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int);
  468. extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *);
  469. static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
  470. {
  471. return bdev->bd_disk->queue;
  472. }
  473. static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
  474. struct page *page)
  475. {
  476. if (bdi && bdi->unplug_io_fn)
  477. bdi->unplug_io_fn(bdi, page);
  478. }
  479. static inline void blk_run_address_space(struct address_space *mapping)
  480. {
  481. if (mapping)
  482. blk_run_backing_dev(mapping->backing_dev_info, NULL);
  483. }
  484. /*
  485. * end_request() and friends. Must be called with the request queue spinlock
  486. * acquired. All functions called within end_request() _must_be_ atomic.
  487. *
  488. * Several drivers define their own end_request and call
  489. * end_that_request_first() and end_that_request_last()
  490. * for parts of the original function. This prevents
  491. * code duplication in drivers.
  492. */
  493. extern int end_that_request_first(struct request *, int, int);
  494. extern int end_that_request_chunk(struct request *, int, int);
  495. extern void end_that_request_last(struct request *);
  496. extern void end_request(struct request *req, int uptodate);
  497. /*
  498. * end_that_request_first/chunk() takes an uptodate argument. we account
  499. * any value <= as an io error. 0 means -EIO for compatability reasons,
  500. * any other < 0 value is the direct error type. An uptodate value of
  501. * 1 indicates successful io completion
  502. */
  503. #define end_io_error(uptodate) (unlikely((uptodate) <= 0))
  504. static inline void blkdev_dequeue_request(struct request *req)
  505. {
  506. BUG_ON(list_empty(&req->queuelist));
  507. list_del_init(&req->queuelist);
  508. if (req->rl)
  509. elv_remove_request(req->q, req);
  510. }
  511. /*
  512. * Access functions for manipulating queue properties
  513. */
  514. extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
  515. spinlock_t *lock, int node_id);
  516. extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
  517. extern void blk_cleanup_queue(request_queue_t *);
  518. extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
  519. extern void blk_queue_bounce_limit(request_queue_t *, u64);
  520. extern void blk_queue_max_sectors(request_queue_t *, unsigned short);
  521. extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
  522. extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
  523. extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
  524. extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
  525. extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
  526. extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
  527. extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
  528. extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
  529. extern void blk_queue_dma_alignment(request_queue_t *, int);
  530. extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
  531. extern void blk_queue_ordered(request_queue_t *, int);
  532. extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
  533. extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
  534. extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
  535. extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
  536. extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
  537. extern void blk_dump_rq_flags(struct request *, char *);
  538. extern void generic_unplug_device(request_queue_t *);
  539. extern void __generic_unplug_device(request_queue_t *);
  540. extern long nr_blockdev_pages(void);
  541. extern void blk_wait_queue_drained(request_queue_t *, int);
  542. extern void blk_finish_queue_drain(request_queue_t *);
  543. int blk_get_queue(request_queue_t *);
  544. request_queue_t *blk_alloc_queue(int gfp_mask);
  545. request_queue_t *blk_alloc_queue_node(int,int);
  546. #define blk_put_queue(q) blk_cleanup_queue((q))
  547. /*
  548. * tag stuff
  549. */
  550. #define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
  551. #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
  552. #define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED)
  553. extern int blk_queue_start_tag(request_queue_t *, struct request *);
  554. extern struct request *blk_queue_find_tag(request_queue_t *, int);
  555. extern void blk_queue_end_tag(request_queue_t *, struct request *);
  556. extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
  557. extern void blk_queue_free_tags(request_queue_t *);
  558. extern int blk_queue_resize_tags(request_queue_t *, int);
  559. extern void blk_queue_invalidate_tags(request_queue_t *);
  560. extern long blk_congestion_wait(int rw, long timeout);
  561. extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
  562. extern int blkdev_issue_flush(struct block_device *, sector_t *);
  563. #define MAX_PHYS_SEGMENTS 128
  564. #define MAX_HW_SEGMENTS 128
  565. #define MAX_SECTORS 255
  566. #define MAX_SEGMENT_SIZE 65536
  567. #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
  568. static inline int queue_hardsect_size(request_queue_t *q)
  569. {
  570. int retval = 512;
  571. if (q && q->hardsect_size)
  572. retval = q->hardsect_size;
  573. return retval;
  574. }
  575. static inline int bdev_hardsect_size(struct block_device *bdev)
  576. {
  577. return queue_hardsect_size(bdev_get_queue(bdev));
  578. }
  579. static inline int queue_dma_alignment(request_queue_t *q)
  580. {
  581. int retval = 511;
  582. if (q && q->dma_alignment)
  583. retval = q->dma_alignment;
  584. return retval;
  585. }
  586. static inline int bdev_dma_aligment(struct block_device *bdev)
  587. {
  588. return queue_dma_alignment(bdev_get_queue(bdev));
  589. }
  590. #define blk_finished_io(nsects) do { } while (0)
  591. #define blk_started_io(nsects) do { } while (0)
  592. /* assumes size > 256 */
  593. static inline unsigned int blksize_bits(unsigned int size)
  594. {
  595. unsigned int bits = 8;
  596. do {
  597. bits++;
  598. size >>= 1;
  599. } while (size > 256);
  600. return bits;
  601. }
  602. extern inline unsigned int block_size(struct block_device *bdev)
  603. {
  604. return bdev->bd_block_size;
  605. }
  606. typedef struct {struct page *v;} Sector;
  607. unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
  608. static inline void put_dev_sector(Sector p)
  609. {
  610. page_cache_release(p.v);
  611. }
  612. struct work_struct;
  613. int kblockd_schedule_work(struct work_struct *work);
  614. void kblockd_flush(void);
  615. #ifdef CONFIG_LBD
  616. # include <asm/div64.h>
  617. # define sector_div(a, b) do_div(a, b)
  618. #else
  619. # define sector_div(n, b)( \
  620. { \
  621. int _res; \
  622. _res = (n) % (b); \
  623. (n) /= (b); \
  624. _res; \
  625. } \
  626. )
  627. #endif
  628. #define MODULE_ALIAS_BLOCKDEV(major,minor) \
  629. MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
  630. #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
  631. MODULE_ALIAS("block-major-" __stringify(major) "-*")
  632. #endif