scsi_host.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. #ifndef _SCSI_SCSI_HOST_H
  2. #define _SCSI_SCSI_HOST_H
  3. #include <linux/device.h>
  4. #include <linux/list.h>
  5. #include <linux/types.h>
  6. #include <linux/workqueue.h>
  7. #include <linux/mutex.h>
  8. struct block_device;
  9. struct completion;
  10. struct module;
  11. struct scsi_cmnd;
  12. struct scsi_device;
  13. struct scsi_target;
  14. struct Scsi_Host;
  15. struct scsi_host_cmd_pool;
  16. struct scsi_transport_template;
  17. struct blk_queue_tags;
  18. /*
  19. * The various choices mean:
  20. * NONE: Self evident. Host adapter is not capable of scatter-gather.
  21. * ALL: Means that the host adapter module can do scatter-gather,
  22. * and that there is no limit to the size of the table to which
  23. * we scatter/gather data.
  24. * Anything else: Indicates the maximum number of chains that can be
  25. * used in one scatter-gather request.
  26. */
  27. #define SG_NONE 0
  28. #define SG_ALL 0xff
  29. #define DISABLE_CLUSTERING 0
  30. #define ENABLE_CLUSTERING 1
  31. enum scsi_eh_timer_return {
  32. EH_NOT_HANDLED,
  33. EH_HANDLED,
  34. EH_RESET_TIMER,
  35. };
  36. struct scsi_host_template {
  37. struct module *module;
  38. const char *name;
  39. /*
  40. * Used to initialize old-style drivers. For new-style drivers
  41. * just perform all work in your module initialization function.
  42. *
  43. * Status: OBSOLETE
  44. */
  45. int (* detect)(struct scsi_host_template *);
  46. /*
  47. * Used as unload callback for hosts with old-style drivers.
  48. *
  49. * Status: OBSOLETE
  50. */
  51. int (* release)(struct Scsi_Host *);
  52. /*
  53. * The info function will return whatever useful information the
  54. * developer sees fit. If not provided, then the name field will
  55. * be used instead.
  56. *
  57. * Status: OPTIONAL
  58. */
  59. const char *(* info)(struct Scsi_Host *);
  60. /*
  61. * Ioctl interface
  62. *
  63. * Status: OPTIONAL
  64. */
  65. int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  66. #ifdef CONFIG_COMPAT
  67. /*
  68. * Compat handler. Handle 32bit ABI.
  69. * When unknown ioctl is passed return -ENOIOCTLCMD.
  70. *
  71. * Status: OPTIONAL
  72. */
  73. int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  74. #endif
  75. /*
  76. * The queuecommand function is used to queue up a scsi
  77. * command block to the LLDD. When the driver finished
  78. * processing the command the done callback is invoked.
  79. *
  80. * If queuecommand returns 0, then the HBA has accepted the
  81. * command. The done() function must be called on the command
  82. * when the driver has finished with it. (you may call done on the
  83. * command before queuecommand returns, but in this case you
  84. * *must* return 0 from queuecommand).
  85. *
  86. * Queuecommand may also reject the command, in which case it may
  87. * not touch the command and must not call done() for it.
  88. *
  89. * There are two possible rejection returns:
  90. *
  91. * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
  92. * allow commands to other devices serviced by this host.
  93. *
  94. * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
  95. * host temporarily.
  96. *
  97. * For compatibility, any other non-zero return is treated the
  98. * same as SCSI_MLQUEUE_HOST_BUSY.
  99. *
  100. * NOTE: "temporarily" means either until the next command for#
  101. * this device/host completes, or a period of time determined by
  102. * I/O pressure in the system if there are no other outstanding
  103. * commands.
  104. *
  105. * STATUS: REQUIRED
  106. */
  107. int (* queuecommand)(struct scsi_cmnd *,
  108. void (*done)(struct scsi_cmnd *));
  109. /*
  110. * This is an error handling strategy routine. You don't need to
  111. * define one of these if you don't want to - there is a default
  112. * routine that is present that should work in most cases. For those
  113. * driver authors that have the inclination and ability to write their
  114. * own strategy routine, this is where it is specified. Note - the
  115. * strategy routine is *ALWAYS* run in the context of the kernel eh
  116. * thread. Thus you are guaranteed to *NOT* be in an interrupt
  117. * handler when you execute this, and you are also guaranteed to
  118. * *NOT* have any other commands being queued while you are in the
  119. * strategy routine. When you return from this function, operations
  120. * return to normal.
  121. *
  122. * See scsi_error.c scsi_unjam_host for additional comments about
  123. * what this function should and should not be attempting to do.
  124. *
  125. * Status: REQUIRED (at least one of them)
  126. */
  127. int (* eh_abort_handler)(struct scsi_cmnd *);
  128. int (* eh_device_reset_handler)(struct scsi_cmnd *);
  129. int (* eh_bus_reset_handler)(struct scsi_cmnd *);
  130. int (* eh_host_reset_handler)(struct scsi_cmnd *);
  131. /*
  132. * Before the mid layer attempts to scan for a new device where none
  133. * currently exists, it will call this entry in your driver. Should
  134. * your driver need to allocate any structs or perform any other init
  135. * items in order to send commands to a currently unused target/lun
  136. * combo, then this is where you can perform those allocations. This
  137. * is specifically so that drivers won't have to perform any kind of
  138. * "is this a new device" checks in their queuecommand routine,
  139. * thereby making the hot path a bit quicker.
  140. *
  141. * Return values: 0 on success, non-0 on failure
  142. *
  143. * Deallocation: If we didn't find any devices at this ID, you will
  144. * get an immediate call to slave_destroy(). If we find something
  145. * here then you will get a call to slave_configure(), then the
  146. * device will be used for however long it is kept around, then when
  147. * the device is removed from the system (or * possibly at reboot
  148. * time), you will then get a call to slave_destroy(). This is
  149. * assuming you implement slave_configure and slave_destroy.
  150. * However, if you allocate memory and hang it off the device struct,
  151. * then you must implement the slave_destroy() routine at a minimum
  152. * in order to avoid leaking memory
  153. * each time a device is tore down.
  154. *
  155. * Status: OPTIONAL
  156. */
  157. int (* slave_alloc)(struct scsi_device *);
  158. /*
  159. * Once the device has responded to an INQUIRY and we know the
  160. * device is online, we call into the low level driver with the
  161. * struct scsi_device *. If the low level device driver implements
  162. * this function, it *must* perform the task of setting the queue
  163. * depth on the device. All other tasks are optional and depend
  164. * on what the driver supports and various implementation details.
  165. *
  166. * Things currently recommended to be handled at this time include:
  167. *
  168. * 1. Setting the device queue depth. Proper setting of this is
  169. * described in the comments for scsi_adjust_queue_depth.
  170. * 2. Determining if the device supports the various synchronous
  171. * negotiation protocols. The device struct will already have
  172. * responded to INQUIRY and the results of the standard items
  173. * will have been shoved into the various device flag bits, eg.
  174. * device->sdtr will be true if the device supports SDTR messages.
  175. * 3. Allocating command structs that the device will need.
  176. * 4. Setting the default timeout on this device (if needed).
  177. * 5. Anything else the low level driver might want to do on a device
  178. * specific setup basis...
  179. * 6. Return 0 on success, non-0 on error. The device will be marked
  180. * as offline on error so that no access will occur. If you return
  181. * non-0, your slave_destroy routine will never get called for this
  182. * device, so don't leave any loose memory hanging around, clean
  183. * up after yourself before returning non-0
  184. *
  185. * Status: OPTIONAL
  186. */
  187. int (* slave_configure)(struct scsi_device *);
  188. /*
  189. * Immediately prior to deallocating the device and after all activity
  190. * has ceased the mid layer calls this point so that the low level
  191. * driver may completely detach itself from the scsi device and vice
  192. * versa. The low level driver is responsible for freeing any memory
  193. * it allocated in the slave_alloc or slave_configure calls.
  194. *
  195. * Status: OPTIONAL
  196. */
  197. void (* slave_destroy)(struct scsi_device *);
  198. /*
  199. * Before the mid layer attempts to scan for a new device attached
  200. * to a target where no target currently exists, it will call this
  201. * entry in your driver. Should your driver need to allocate any
  202. * structs or perform any other init items in order to send commands
  203. * to a currently unused target, then this is where you can perform
  204. * those allocations.
  205. *
  206. * Return values: 0 on success, non-0 on failure
  207. *
  208. * Status: OPTIONAL
  209. */
  210. int (* target_alloc)(struct scsi_target *);
  211. /*
  212. * Immediately prior to deallocating the target structure, and
  213. * after all activity to attached scsi devices has ceased, the
  214. * midlayer calls this point so that the driver may deallocate
  215. * and terminate any references to the target.
  216. *
  217. * Status: OPTIONAL
  218. */
  219. void (* target_destroy)(struct scsi_target *);
  220. /*
  221. * fill in this function to allow the queue depth of this host
  222. * to be changeable (on a per device basis). returns either
  223. * the current queue depth setting (may be different from what
  224. * was passed in) or an error. An error should only be
  225. * returned if the requested depth is legal but the driver was
  226. * unable to set it. If the requested depth is illegal, the
  227. * driver should set and return the closest legal queue depth.
  228. *
  229. */
  230. int (* change_queue_depth)(struct scsi_device *, int);
  231. /*
  232. * fill in this function to allow the changing of tag types
  233. * (this also allows the enabling/disabling of tag command
  234. * queueing). An error should only be returned if something
  235. * went wrong in the driver while trying to set the tag type.
  236. * If the driver doesn't support the requested tag type, then
  237. * it should set the closest type it does support without
  238. * returning an error. Returns the actual tag type set.
  239. */
  240. int (* change_queue_type)(struct scsi_device *, int);
  241. /*
  242. * This function determines the bios parameters for a given
  243. * harddisk. These tend to be numbers that are made up by
  244. * the host adapter. Parameters:
  245. * size, device, list (heads, sectors, cylinders)
  246. *
  247. * Status: OPTIONAL */
  248. int (* bios_param)(struct scsi_device *, struct block_device *,
  249. sector_t, int []);
  250. /*
  251. * Can be used to export driver statistics and other infos to the
  252. * world outside the kernel ie. userspace and it also provides an
  253. * interface to feed the driver with information.
  254. *
  255. * Status: OBSOLETE
  256. */
  257. int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
  258. /*
  259. * suspend support
  260. */
  261. int (*resume)(struct scsi_device *);
  262. int (*suspend)(struct scsi_device *, pm_message_t state);
  263. /*
  264. * Name of proc directory
  265. */
  266. char *proc_name;
  267. /*
  268. * Used to store the procfs directory if a driver implements the
  269. * proc_info method.
  270. */
  271. struct proc_dir_entry *proc_dir;
  272. /*
  273. * This determines if we will use a non-interrupt driven
  274. * or an interrupt driven scheme, It is set to the maximum number
  275. * of simultaneous commands a given host adapter will accept.
  276. */
  277. int can_queue;
  278. /*
  279. * In many instances, especially where disconnect / reconnect are
  280. * supported, our host also has an ID on the SCSI bus. If this is
  281. * the case, then it must be reserved. Please set this_id to -1 if
  282. * your setup is in single initiator mode, and the host lacks an
  283. * ID.
  284. */
  285. int this_id;
  286. /*
  287. * This determines the degree to which the host adapter is capable
  288. * of scatter-gather.
  289. */
  290. unsigned short sg_tablesize;
  291. /*
  292. * If the host adapter has limitations beside segment count
  293. */
  294. unsigned short max_sectors;
  295. /*
  296. * dma scatter gather segment boundary limit. a segment crossing this
  297. * boundary will be split in two.
  298. */
  299. unsigned long dma_boundary;
  300. /*
  301. * This specifies "machine infinity" for host templates which don't
  302. * limit the transfer size. Note this limit represents an absolute
  303. * maximum, and may be over the transfer limits allowed for
  304. * individual devices (e.g. 256 for SCSI-1)
  305. */
  306. #define SCSI_DEFAULT_MAX_SECTORS 1024
  307. /*
  308. * True if this host adapter can make good use of linked commands.
  309. * This will allow more than one command to be queued to a given
  310. * unit on a given host. Set this to the maximum number of command
  311. * blocks to be provided for each device. Set this to 1 for one
  312. * command block per lun, 2 for two, etc. Do not set this to 0.
  313. * You should make sure that the host adapter will do the right thing
  314. * before you try setting this above 1.
  315. */
  316. short cmd_per_lun;
  317. /*
  318. * present contains counter indicating how many boards of this
  319. * type were found when we did the scan.
  320. */
  321. unsigned char present;
  322. /*
  323. * true if this host adapter uses unchecked DMA onto an ISA bus.
  324. */
  325. unsigned unchecked_isa_dma:1;
  326. /*
  327. * true if this host adapter can make good use of clustering.
  328. * I originally thought that if the tablesize was large that it
  329. * was a waste of CPU cycles to prepare a cluster list, but
  330. * it works out that the Buslogic is faster if you use a smaller
  331. * number of segments (i.e. use clustering). I guess it is
  332. * inefficient.
  333. */
  334. unsigned use_clustering:1;
  335. /*
  336. * True for emulated SCSI host adapters (e.g. ATAPI)
  337. */
  338. unsigned emulated:1;
  339. /*
  340. * True if the low-level driver performs its own reset-settle delays.
  341. */
  342. unsigned skip_settle_delay:1;
  343. /*
  344. * ordered write support
  345. */
  346. unsigned ordered_tag:1;
  347. /*
  348. * Countdown for host blocking with no commands outstanding
  349. */
  350. unsigned int max_host_blocked;
  351. /*
  352. * Default value for the blocking. If the queue is empty,
  353. * host_blocked counts down in the request_fn until it restarts
  354. * host operations as zero is reached.
  355. *
  356. * FIXME: This should probably be a value in the template
  357. */
  358. #define SCSI_DEFAULT_HOST_BLOCKED 7
  359. /*
  360. * Pointer to the sysfs class properties for this host, NULL terminated.
  361. */
  362. struct class_device_attribute **shost_attrs;
  363. /*
  364. * Pointer to the SCSI device properties for this host, NULL terminated.
  365. */
  366. struct device_attribute **sdev_attrs;
  367. /*
  368. * List of hosts per template.
  369. *
  370. * This is only for use by scsi_module.c for legacy templates.
  371. * For these access to it is synchronized implicitly by
  372. * module_init/module_exit.
  373. */
  374. struct list_head legacy_hosts;
  375. };
  376. /*
  377. * shost state: If you alter this, you also need to alter scsi_sysfs.c
  378. * (for the ascii descriptions) and the state model enforcer:
  379. * scsi_host_set_state()
  380. */
  381. enum scsi_host_state {
  382. SHOST_CREATED = 1,
  383. SHOST_RUNNING,
  384. SHOST_CANCEL,
  385. SHOST_DEL,
  386. SHOST_RECOVERY,
  387. SHOST_CANCEL_RECOVERY,
  388. SHOST_DEL_RECOVERY,
  389. };
  390. struct Scsi_Host {
  391. /*
  392. * __devices is protected by the host_lock, but you should
  393. * usually use scsi_device_lookup / shost_for_each_device
  394. * to access it and don't care about locking yourself.
  395. * In the rare case of beeing in irq context you can use
  396. * their __ prefixed variants with the lock held. NEVER
  397. * access this list directly from a driver.
  398. */
  399. struct list_head __devices;
  400. struct list_head __targets;
  401. struct scsi_host_cmd_pool *cmd_pool;
  402. spinlock_t free_list_lock;
  403. struct list_head free_list; /* backup store of cmd structs */
  404. struct list_head starved_list;
  405. spinlock_t default_lock;
  406. spinlock_t *host_lock;
  407. struct mutex scan_mutex;/* serialize scanning activity */
  408. struct list_head eh_cmd_q;
  409. struct task_struct * ehandler; /* Error recovery thread. */
  410. struct completion * eh_action; /* Wait for specific actions on the
  411. host. */
  412. wait_queue_head_t host_wait;
  413. struct scsi_host_template *hostt;
  414. struct scsi_transport_template *transportt;
  415. /*
  416. * area to keep a shared tag map (if needed, will be
  417. * NULL if not)
  418. */
  419. struct blk_queue_tag *bqt;
  420. /*
  421. * The following two fields are protected with host_lock;
  422. * however, eh routines can safely access during eh processing
  423. * without acquiring the lock.
  424. */
  425. unsigned int host_busy; /* commands actually active on low-level */
  426. unsigned int host_failed; /* commands that failed. */
  427. unsigned int host_eh_scheduled; /* EH scheduled without command */
  428. unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
  429. int resetting; /* if set, it means that last_reset is a valid value */
  430. unsigned long last_reset;
  431. /*
  432. * These three parameters can be used to allow for wide scsi,
  433. * and for host adapters that support multiple busses
  434. * The first two should be set to 1 more than the actual max id
  435. * or lun (i.e. 8 for normal systems).
  436. */
  437. unsigned int max_id;
  438. unsigned int max_lun;
  439. unsigned int max_channel;
  440. /*
  441. * This is a unique identifier that must be assigned so that we
  442. * have some way of identifying each detected host adapter properly
  443. * and uniquely. For hosts that do not support more than one card
  444. * in the system at one time, this does not need to be set. It is
  445. * initialized to 0 in scsi_register.
  446. */
  447. unsigned int unique_id;
  448. /*
  449. * The maximum length of SCSI commands that this host can accept.
  450. * Probably 12 for most host adapters, but could be 16 for others.
  451. * For drivers that don't set this field, a value of 12 is
  452. * assumed. I am leaving this as a number rather than a bit
  453. * because you never know what subsequent SCSI standards might do
  454. * (i.e. could there be a 20 byte or a 24-byte command a few years
  455. * down the road?).
  456. */
  457. unsigned char max_cmd_len;
  458. int this_id;
  459. int can_queue;
  460. short cmd_per_lun;
  461. short unsigned int sg_tablesize;
  462. short unsigned int max_sectors;
  463. unsigned long dma_boundary;
  464. /*
  465. * Used to assign serial numbers to the cmds.
  466. * Protected by the host lock.
  467. */
  468. unsigned long cmd_serial_number, cmd_pid;
  469. unsigned unchecked_isa_dma:1;
  470. unsigned use_clustering:1;
  471. unsigned use_blk_tcq:1;
  472. /*
  473. * Host has requested that no further requests come through for the
  474. * time being.
  475. */
  476. unsigned host_self_blocked:1;
  477. /*
  478. * Host uses correct SCSI ordering not PC ordering. The bit is
  479. * set for the minority of drivers whose authors actually read
  480. * the spec ;)
  481. */
  482. unsigned reverse_ordering:1;
  483. /*
  484. * ordered write support
  485. */
  486. unsigned ordered_tag:1;
  487. /* task mgmt function in progress */
  488. unsigned tmf_in_progress:1;
  489. /*
  490. * Optional work queue to be utilized by the transport
  491. */
  492. char work_q_name[KOBJ_NAME_LEN];
  493. struct workqueue_struct *work_q;
  494. /*
  495. * Host has rejected a command because it was busy.
  496. */
  497. unsigned int host_blocked;
  498. /*
  499. * Value host_blocked counts down from
  500. */
  501. unsigned int max_host_blocked;
  502. /* legacy crap */
  503. unsigned long base;
  504. unsigned long io_port;
  505. unsigned char n_io_port;
  506. unsigned char dma_channel;
  507. unsigned int irq;
  508. enum scsi_host_state shost_state;
  509. /* ldm bits */
  510. struct device shost_gendev;
  511. struct class_device shost_classdev;
  512. /*
  513. * List of hosts per template.
  514. *
  515. * This is only for use by scsi_module.c for legacy templates.
  516. * For these access to it is synchronized implicitly by
  517. * module_init/module_exit.
  518. */
  519. struct list_head sht_legacy_list;
  520. /*
  521. * Points to the transport data (if any) which is allocated
  522. * separately
  523. */
  524. void *shost_data;
  525. /*
  526. * We should ensure that this is aligned, both for better performance
  527. * and also because some compilers (m68k) don't automatically force
  528. * alignment to a long boundary.
  529. */
  530. unsigned long hostdata[0] /* Used for storage of host specific stuff */
  531. __attribute__ ((aligned (sizeof(unsigned long))));
  532. };
  533. #define class_to_shost(d) \
  534. container_of(d, struct Scsi_Host, shost_classdev)
  535. #define shost_printk(prefix, shost, fmt, a...) \
  536. dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
  537. int scsi_is_host_device(const struct device *);
  538. static inline struct Scsi_Host *dev_to_shost(struct device *dev)
  539. {
  540. while (!scsi_is_host_device(dev)) {
  541. if (!dev->parent)
  542. return NULL;
  543. dev = dev->parent;
  544. }
  545. return container_of(dev, struct Scsi_Host, shost_gendev);
  546. }
  547. static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
  548. {
  549. return shost->shost_state == SHOST_RECOVERY ||
  550. shost->shost_state == SHOST_CANCEL_RECOVERY ||
  551. shost->shost_state == SHOST_DEL_RECOVERY ||
  552. shost->tmf_in_progress;
  553. }
  554. extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
  555. extern void scsi_flush_work(struct Scsi_Host *);
  556. extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
  557. extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
  558. extern void scsi_scan_host(struct Scsi_Host *);
  559. extern void scsi_rescan_device(struct device *);
  560. extern void scsi_remove_host(struct Scsi_Host *);
  561. extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
  562. extern void scsi_host_put(struct Scsi_Host *t);
  563. extern struct Scsi_Host *scsi_host_lookup(unsigned short);
  564. extern const char *scsi_host_state_name(enum scsi_host_state);
  565. extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
  566. static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
  567. {
  568. shost->host_lock = lock;
  569. }
  570. static inline struct device *scsi_get_device(struct Scsi_Host *shost)
  571. {
  572. return shost->shost_gendev.parent;
  573. }
  574. /**
  575. * scsi_host_scan_allowed - Is scanning of this host allowed
  576. * @shost: Pointer to Scsi_Host.
  577. **/
  578. static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
  579. {
  580. return shost->shost_state == SHOST_RUNNING;
  581. }
  582. extern void scsi_unblock_requests(struct Scsi_Host *);
  583. extern void scsi_block_requests(struct Scsi_Host *);
  584. struct class_container;
  585. /*
  586. * These two functions are used to allocate and free a pseudo device
  587. * which will connect to the host adapter itself rather than any
  588. * physical device. You must deallocate when you are done with the
  589. * thing. This physical pseudo-device isn't real and won't be available
  590. * from any high-level drivers.
  591. */
  592. extern void scsi_free_host_dev(struct scsi_device *);
  593. extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
  594. /* legacy interfaces */
  595. extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
  596. extern void scsi_unregister(struct Scsi_Host *);
  597. extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
  598. #endif /* _SCSI_SCSI_HOST_H */