host.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #ifndef _SCI_HOST_H_
  56. #define _SCI_HOST_H_
  57. #include "remote_device.h"
  58. #include "phy.h"
  59. #include "pool.h"
  60. #include "isci.h"
  61. #include "remote_node_table.h"
  62. #include "registers.h"
  63. #include "scu_unsolicited_frame.h"
  64. #include "unsolicited_frame_control.h"
  65. #include "probe_roms.h"
  66. struct scic_sds_request;
  67. struct scu_task_context;
  68. /**
  69. * struct scic_power_control -
  70. *
  71. * This structure defines the fields for managing power control for direct
  72. * attached disk devices.
  73. */
  74. struct scic_power_control {
  75. /**
  76. * This field is set when the power control timer is running and cleared when
  77. * it is not.
  78. */
  79. bool timer_started;
  80. /**
  81. * Timer to control when the directed attached disks can consume power.
  82. */
  83. struct sci_timer timer;
  84. /**
  85. * This field is used to keep track of how many phys are put into the
  86. * requesters field.
  87. */
  88. u8 phys_waiting;
  89. /**
  90. * This field is used to keep track of how many phys have been granted to consume power
  91. */
  92. u8 phys_granted_power;
  93. /**
  94. * This field is an array of phys that we are waiting on. The phys are direct
  95. * mapped into requesters via struct scic_sds_phy.phy_index
  96. */
  97. struct scic_sds_phy *requesters[SCI_MAX_PHYS];
  98. };
  99. struct scic_sds_port_configuration_agent;
  100. typedef void (*port_config_fn)(struct scic_sds_controller *,
  101. struct scic_sds_port_configuration_agent *,
  102. struct scic_sds_port *, struct scic_sds_phy *);
  103. struct scic_sds_port_configuration_agent {
  104. u16 phy_configured_mask;
  105. u16 phy_ready_mask;
  106. struct {
  107. u8 min_index;
  108. u8 max_index;
  109. } phy_valid_port_range[SCI_MAX_PHYS];
  110. bool timer_pending;
  111. port_config_fn link_up_handler;
  112. port_config_fn link_down_handler;
  113. struct sci_timer timer;
  114. };
  115. /**
  116. * struct scic_sds_controller -
  117. *
  118. * This structure represents the SCU controller object.
  119. */
  120. struct scic_sds_controller {
  121. /**
  122. * This field contains the information for the base controller state
  123. * machine.
  124. */
  125. struct sci_base_state_machine sm;
  126. /**
  127. * Timer for controller start/stop operations.
  128. */
  129. struct sci_timer timer;
  130. /**
  131. * This field contains the user parameters to be utilized for this
  132. * core controller object.
  133. */
  134. union scic_user_parameters user_parameters;
  135. /**
  136. * This field contains the OEM parameters to be utilized for this
  137. * core controller object.
  138. */
  139. union scic_oem_parameters oem_parameters;
  140. /**
  141. * This field contains the port configuration agent for this controller.
  142. */
  143. struct scic_sds_port_configuration_agent port_agent;
  144. /**
  145. * This field is the array of device objects that are currently constructed
  146. * for this controller object. This table is used as a fast lookup of device
  147. * objects that need to handle device completion notifications from the
  148. * hardware. The table is RNi based.
  149. */
  150. struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
  151. /**
  152. * This field is the array of IO request objects that are currently active for
  153. * this controller object. This table is used as a fast lookup of the io
  154. * request object that need to handle completion queue notifications. The
  155. * table is TCi based.
  156. */
  157. struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS];
  158. /**
  159. * This field is the free RNi data structure
  160. */
  161. struct scic_remote_node_table available_remote_nodes;
  162. /**
  163. * This field is the TCi pool used to manage the task context index.
  164. */
  165. SCI_POOL_CREATE(tci_pool, u16, SCI_MAX_IO_REQUESTS);
  166. /**
  167. * This filed is the struct scic_power_control data used to controll when direct
  168. * attached devices can consume power.
  169. */
  170. struct scic_power_control power_control;
  171. /**
  172. * This field is the array of sequence values for the IO Tag fields. Even
  173. * though only 4 bits of the field is used for the sequence the sequence is 16
  174. * bits in size so the sequence can be bitwise or'd with the TCi to build the
  175. * IO Tag value.
  176. */
  177. u16 io_request_sequence[SCI_MAX_IO_REQUESTS];
  178. /**
  179. * This field in the array of sequence values for the RNi. These are used
  180. * to control io request build to io request start operations. The sequence
  181. * value is recorded into an io request when it is built and is checked on
  182. * the io request start operation to make sure that there was not a device
  183. * hot plug between the build and start operation.
  184. */
  185. u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES];
  186. /**
  187. * This field is a pointer to the memory allocated by the driver for the task
  188. * context table. This data is shared between the hardware and software.
  189. */
  190. struct scu_task_context *task_context_table;
  191. /**
  192. * This field is a pointer to the memory allocated by the driver for the
  193. * remote node context table. This table is shared between the hardware and
  194. * software.
  195. */
  196. union scu_remote_node_context *remote_node_context_table;
  197. /**
  198. * This field is a pointer to the completion queue. This memory is
  199. * written to by the hardware and read by the software.
  200. */
  201. u32 *completion_queue;
  202. /**
  203. * This field is the software copy of the completion queue get pointer. The
  204. * controller object writes this value to the hardware after processing the
  205. * completion entries.
  206. */
  207. u32 completion_queue_get;
  208. /**
  209. * This field is the minimum of the number of hardware supported port entries
  210. * and the software requested port entries.
  211. */
  212. u32 logical_port_entries;
  213. /**
  214. * This field is the minimum number of hardware supported completion queue
  215. * entries and the software requested completion queue entries.
  216. */
  217. u32 completion_queue_entries;
  218. /**
  219. * This field is the minimum number of hardware supported event entries and
  220. * the software requested event entries.
  221. */
  222. u32 completion_event_entries;
  223. /**
  224. * This field is the minimum number of devices supported by the hardware and
  225. * the number of devices requested by the software.
  226. */
  227. u32 remote_node_entries;
  228. /**
  229. * This field is the minimum number of IO requests supported by the hardware
  230. * and the number of IO requests requested by the software.
  231. */
  232. u32 task_context_entries;
  233. /**
  234. * This object contains all of the unsolicited frame specific
  235. * data utilized by the core controller.
  236. */
  237. struct scic_sds_unsolicited_frame_control uf_control;
  238. /* Phy Startup Data */
  239. /**
  240. * Timer for controller phy request startup. On controller start the
  241. * controller will start each PHY individually in order of phy index.
  242. */
  243. struct sci_timer phy_timer;
  244. /**
  245. * This field is set when the phy_timer is running and is cleared when
  246. * the phy_timer is stopped.
  247. */
  248. bool phy_startup_timer_pending;
  249. /**
  250. * This field is the index of the next phy start. It is initialized to 0 and
  251. * increments for each phy index that is started.
  252. */
  253. u32 next_phy_to_start;
  254. /**
  255. * This field controlls the invalid link up notifications to the SCI_USER. If
  256. * an invalid_link_up notification is reported a bit for the PHY index is set
  257. * so further notifications are not made. Once the PHY object reports link up
  258. * and is made part of a port then this bit for the PHY index is cleared.
  259. */
  260. u8 invalid_phy_mask;
  261. /*
  262. * This field saves the current interrupt coalescing number of the controller.
  263. */
  264. u16 interrupt_coalesce_number;
  265. /*
  266. * This field saves the current interrupt coalescing timeout value in microseconds.
  267. */
  268. u32 interrupt_coalesce_timeout;
  269. /**
  270. * This field is a pointer to the memory mapped register space for the
  271. * struct smu_registers.
  272. */
  273. struct smu_registers __iomem *smu_registers;
  274. /**
  275. * This field is a pointer to the memory mapped register space for the
  276. * struct scu_registers.
  277. */
  278. struct scu_registers __iomem *scu_registers;
  279. };
  280. struct isci_host {
  281. struct scic_sds_controller sci;
  282. union scic_oem_parameters oem_parameters;
  283. int id; /* unique within a given pci device */
  284. void *core_ctrl_memory;
  285. struct dma_pool *dma_pool;
  286. struct isci_phy phys[SCI_MAX_PHYS];
  287. struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
  288. struct sas_ha_struct sas_ha;
  289. int can_queue;
  290. spinlock_t queue_lock;
  291. spinlock_t state_lock;
  292. struct pci_dev *pdev;
  293. enum isci_status status;
  294. #define IHOST_START_PENDING 0
  295. #define IHOST_STOP_PENDING 1
  296. unsigned long flags;
  297. wait_queue_head_t eventq;
  298. struct Scsi_Host *shost;
  299. struct tasklet_struct completion_tasklet;
  300. struct list_head requests_to_complete;
  301. struct list_head requests_to_errorback;
  302. spinlock_t scic_lock;
  303. struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
  304. };
  305. /**
  306. * enum scic_sds_controller_states - This enumeration depicts all the states
  307. * for the common controller state machine.
  308. */
  309. enum scic_sds_controller_states {
  310. /**
  311. * Simply the initial state for the base controller state machine.
  312. */
  313. SCIC_INITIAL = 0,
  314. /**
  315. * This state indicates that the controller is reset. The memory for
  316. * the controller is in it's initial state, but the controller requires
  317. * initialization.
  318. * This state is entered from the INITIAL state.
  319. * This state is entered from the RESETTING state.
  320. */
  321. SCIC_RESET,
  322. /**
  323. * This state is typically an action state that indicates the controller
  324. * is in the process of initialization. In this state no new IO operations
  325. * are permitted.
  326. * This state is entered from the RESET state.
  327. */
  328. SCIC_INITIALIZING,
  329. /**
  330. * This state indicates that the controller has been successfully
  331. * initialized. In this state no new IO operations are permitted.
  332. * This state is entered from the INITIALIZING state.
  333. */
  334. SCIC_INITIALIZED,
  335. /**
  336. * This state indicates the the controller is in the process of becoming
  337. * ready (i.e. starting). In this state no new IO operations are permitted.
  338. * This state is entered from the INITIALIZED state.
  339. */
  340. SCIC_STARTING,
  341. /**
  342. * This state indicates the controller is now ready. Thus, the user
  343. * is able to perform IO operations on the controller.
  344. * This state is entered from the STARTING state.
  345. */
  346. SCIC_READY,
  347. /**
  348. * This state is typically an action state that indicates the controller
  349. * is in the process of resetting. Thus, the user is unable to perform
  350. * IO operations on the controller. A reset is considered destructive in
  351. * most cases.
  352. * This state is entered from the READY state.
  353. * This state is entered from the FAILED state.
  354. * This state is entered from the STOPPED state.
  355. */
  356. SCIC_RESETTING,
  357. /**
  358. * This state indicates that the controller is in the process of stopping.
  359. * In this state no new IO operations are permitted, but existing IO
  360. * operations are allowed to complete.
  361. * This state is entered from the READY state.
  362. */
  363. SCIC_STOPPING,
  364. /**
  365. * This state indicates that the controller has successfully been stopped.
  366. * In this state no new IO operations are permitted.
  367. * This state is entered from the STOPPING state.
  368. */
  369. SCIC_STOPPED,
  370. /**
  371. * This state indicates that the controller could not successfully be
  372. * initialized. In this state no new IO operations are permitted.
  373. * This state is entered from the INITIALIZING state.
  374. * This state is entered from the STARTING state.
  375. * This state is entered from the STOPPING state.
  376. * This state is entered from the RESETTING state.
  377. */
  378. SCIC_FAILED,
  379. };
  380. /**
  381. * struct isci_pci_info - This class represents the pci function containing the
  382. * controllers. Depending on PCI SKU, there could be up to 2 controllers in
  383. * the PCI function.
  384. */
  385. #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
  386. struct isci_pci_info {
  387. struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
  388. struct isci_host *hosts[SCI_MAX_CONTROLLERS];
  389. struct isci_orom *orom;
  390. };
  391. static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
  392. {
  393. return pci_get_drvdata(pdev);
  394. }
  395. #define for_each_isci_host(id, ihost, pdev) \
  396. for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
  397. id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
  398. ihost = to_pci_info(pdev)->hosts[++id])
  399. static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
  400. {
  401. return isci_host->status;
  402. }
  403. static inline void isci_host_change_state(struct isci_host *isci_host,
  404. enum isci_status status)
  405. {
  406. unsigned long flags;
  407. dev_dbg(&isci_host->pdev->dev,
  408. "%s: isci_host = %p, state = 0x%x",
  409. __func__,
  410. isci_host,
  411. status);
  412. spin_lock_irqsave(&isci_host->state_lock, flags);
  413. isci_host->status = status;
  414. spin_unlock_irqrestore(&isci_host->state_lock, flags);
  415. }
  416. static inline int isci_host_can_queue(struct isci_host *isci_host, int num)
  417. {
  418. int ret = 0;
  419. unsigned long flags;
  420. spin_lock_irqsave(&isci_host->queue_lock, flags);
  421. if ((isci_host->can_queue - num) < 0) {
  422. dev_dbg(&isci_host->pdev->dev,
  423. "%s: isci_host->can_queue = %d\n",
  424. __func__,
  425. isci_host->can_queue);
  426. ret = -SAS_QUEUE_FULL;
  427. } else
  428. isci_host->can_queue -= num;
  429. spin_unlock_irqrestore(&isci_host->queue_lock, flags);
  430. return ret;
  431. }
  432. static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num)
  433. {
  434. unsigned long flags;
  435. spin_lock_irqsave(&isci_host->queue_lock, flags);
  436. isci_host->can_queue += num;
  437. spin_unlock_irqrestore(&isci_host->queue_lock, flags);
  438. }
  439. static inline void wait_for_start(struct isci_host *ihost)
  440. {
  441. wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
  442. }
  443. static inline void wait_for_stop(struct isci_host *ihost)
  444. {
  445. wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
  446. }
  447. static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
  448. {
  449. wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
  450. }
  451. static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
  452. {
  453. wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
  454. }
  455. static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
  456. {
  457. return dev->port->ha->lldd_ha;
  458. }
  459. static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic)
  460. {
  461. /* XXX delete after merging scic_sds_contoller and isci_host */
  462. struct isci_host *ihost = container_of(scic, typeof(*ihost), sci);
  463. return ihost;
  464. }
  465. /**
  466. * INCREMENT_QUEUE_GET() -
  467. *
  468. * This macro will increment the specified index to and if the index wraps to 0
  469. * it will toggel the cycle bit.
  470. */
  471. #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
  472. { \
  473. if ((index) + 1 == entry_count) { \
  474. (index) = 0; \
  475. (cycle) = (cycle) ^ (bit_toggle); \
  476. } else { \
  477. index = index + 1; \
  478. } \
  479. }
  480. /**
  481. * scic_sds_controller_get_protocol_engine_group() -
  482. *
  483. * This macro returns the protocol engine group for this controller object.
  484. * Presently we only support protocol engine group 0 so just return that
  485. */
  486. #define scic_sds_controller_get_protocol_engine_group(controller) 0
  487. /**
  488. * scic_sds_io_tag_construct() -
  489. *
  490. * This macro constructs an IO tag from the sequence and index values.
  491. */
  492. #define scic_sds_io_tag_construct(sequence, task_index) \
  493. ((sequence) << 12 | (task_index))
  494. /**
  495. * scic_sds_io_tag_get_sequence() -
  496. *
  497. * This macro returns the IO sequence from the IO tag value.
  498. */
  499. #define scic_sds_io_tag_get_sequence(io_tag) \
  500. (((io_tag) & 0xF000) >> 12)
  501. /**
  502. * scic_sds_io_tag_get_index() -
  503. *
  504. * This macro returns the TCi from the io tag value
  505. */
  506. #define scic_sds_io_tag_get_index(io_tag) \
  507. ((io_tag) & 0x0FFF)
  508. /**
  509. * scic_sds_io_sequence_increment() -
  510. *
  511. * This is a helper macro to increment the io sequence count. We may find in
  512. * the future that it will be faster to store the sequence count in such a way
  513. * as we dont perform the shift operation to build io tag values so therefore
  514. * need a way to incrment them correctly
  515. */
  516. #define scic_sds_io_sequence_increment(value) \
  517. ((value) = (((value) + 1) & 0x000F))
  518. /* expander attached sata devices require 3 rnc slots */
  519. static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev)
  520. {
  521. struct domain_device *dev = sci_dev_to_domain(sci_dev);
  522. if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
  523. !sci_dev->is_direct_attached)
  524. return SCU_STP_REMOTE_NODE_COUNT;
  525. return SCU_SSP_REMOTE_NODE_COUNT;
  526. }
  527. /**
  528. * scic_sds_controller_set_invalid_phy() -
  529. *
  530. * This macro will set the bit in the invalid phy mask for this controller
  531. * object. This is used to control messages reported for invalid link up
  532. * notifications.
  533. */
  534. #define scic_sds_controller_set_invalid_phy(controller, phy) \
  535. ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
  536. /**
  537. * scic_sds_controller_clear_invalid_phy() -
  538. *
  539. * This macro will clear the bit in the invalid phy mask for this controller
  540. * object. This is used to control messages reported for invalid link up
  541. * notifications.
  542. */
  543. #define scic_sds_controller_clear_invalid_phy(controller, phy) \
  544. ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
  545. static inline struct device *scic_to_dev(struct scic_sds_controller *scic)
  546. {
  547. return &scic_to_ihost(scic)->pdev->dev;
  548. }
  549. static inline struct device *sciphy_to_dev(struct scic_sds_phy *sci_phy)
  550. {
  551. struct isci_phy *iphy = sci_phy_to_iphy(sci_phy);
  552. if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
  553. return NULL;
  554. return &iphy->isci_port->isci_host->pdev->dev;
  555. }
  556. static inline struct device *sciport_to_dev(struct scic_sds_port *sci_port)
  557. {
  558. struct isci_port *iport = sci_port_to_iport(sci_port);
  559. if (!iport || !iport->isci_host)
  560. return NULL;
  561. return &iport->isci_host->pdev->dev;
  562. }
  563. static inline struct device *scirdev_to_dev(struct scic_sds_remote_device *sci_dev)
  564. {
  565. struct isci_remote_device *idev =
  566. container_of(sci_dev, typeof(*idev), sci);
  567. if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
  568. return NULL;
  569. return &idev->isci_port->isci_host->pdev->dev;
  570. }
  571. enum {
  572. ISCI_SI_REVA0,
  573. ISCI_SI_REVA2,
  574. ISCI_SI_REVB0,
  575. ISCI_SI_REVC0
  576. };
  577. extern int isci_si_rev;
  578. static inline bool is_a0(void)
  579. {
  580. return isci_si_rev == ISCI_SI_REVA0;
  581. }
  582. static inline bool is_a2(void)
  583. {
  584. return isci_si_rev == ISCI_SI_REVA2;
  585. }
  586. static inline bool is_b0(void)
  587. {
  588. return isci_si_rev == ISCI_SI_REVB0;
  589. }
  590. static inline bool is_c0(void)
  591. {
  592. return isci_si_rev > ISCI_SI_REVB0;
  593. }
  594. void scic_sds_controller_post_request(struct scic_sds_controller *scic,
  595. u32 request);
  596. void scic_sds_controller_release_frame(struct scic_sds_controller *scic,
  597. u32 frame_index);
  598. void scic_sds_controller_copy_sata_response(void *response_buffer,
  599. void *frame_header,
  600. void *frame_buffer);
  601. enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic,
  602. struct scic_sds_remote_device *sci_dev,
  603. u16 *node_id);
  604. void scic_sds_controller_free_remote_node_context(
  605. struct scic_sds_controller *scic,
  606. struct scic_sds_remote_device *sci_dev,
  607. u16 node_id);
  608. union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
  609. struct scic_sds_controller *scic,
  610. u16 node_id);
  611. struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
  612. u16 io_tag);
  613. struct scu_task_context *scic_sds_controller_get_task_context_buffer(
  614. struct scic_sds_controller *scic,
  615. u16 io_tag);
  616. void scic_sds_controller_power_control_queue_insert(
  617. struct scic_sds_controller *scic,
  618. struct scic_sds_phy *sci_phy);
  619. void scic_sds_controller_power_control_queue_remove(
  620. struct scic_sds_controller *scic,
  621. struct scic_sds_phy *sci_phy);
  622. void scic_sds_controller_link_up(
  623. struct scic_sds_controller *scic,
  624. struct scic_sds_port *sci_port,
  625. struct scic_sds_phy *sci_phy);
  626. void scic_sds_controller_link_down(
  627. struct scic_sds_controller *scic,
  628. struct scic_sds_port *sci_port,
  629. struct scic_sds_phy *sci_phy);
  630. void scic_sds_controller_remote_device_stopped(
  631. struct scic_sds_controller *scic,
  632. struct scic_sds_remote_device *sci_dev);
  633. void scic_sds_controller_copy_task_context(
  634. struct scic_sds_controller *scic,
  635. struct scic_sds_request *this_request);
  636. void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
  637. enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
  638. int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
  639. void isci_host_scan_start(struct Scsi_Host *);
  640. int isci_host_init(struct isci_host *);
  641. void isci_host_init_controller_names(
  642. struct isci_host *isci_host,
  643. unsigned int controller_idx);
  644. void isci_host_deinit(
  645. struct isci_host *);
  646. void isci_host_port_link_up(
  647. struct isci_host *,
  648. struct scic_sds_port *,
  649. struct scic_sds_phy *);
  650. int isci_host_dev_found(struct domain_device *);
  651. void isci_host_remote_device_start_complete(
  652. struct isci_host *,
  653. struct isci_remote_device *,
  654. enum sci_status);
  655. void scic_controller_disable_interrupts(
  656. struct scic_sds_controller *scic);
  657. enum sci_status scic_controller_start_io(
  658. struct scic_sds_controller *scic,
  659. struct scic_sds_remote_device *remote_device,
  660. struct scic_sds_request *io_request,
  661. u16 io_tag);
  662. enum sci_task_status scic_controller_start_task(
  663. struct scic_sds_controller *scic,
  664. struct scic_sds_remote_device *remote_device,
  665. struct scic_sds_request *task_request,
  666. u16 io_tag);
  667. enum sci_status scic_controller_terminate_request(
  668. struct scic_sds_controller *scic,
  669. struct scic_sds_remote_device *remote_device,
  670. struct scic_sds_request *request);
  671. enum sci_status scic_controller_complete_io(
  672. struct scic_sds_controller *scic,
  673. struct scic_sds_remote_device *remote_device,
  674. struct scic_sds_request *io_request);
  675. u16 scic_controller_allocate_io_tag(
  676. struct scic_sds_controller *scic);
  677. enum sci_status scic_controller_free_io_tag(
  678. struct scic_sds_controller *scic,
  679. u16 io_tag);
  680. void scic_sds_port_configuration_agent_construct(
  681. struct scic_sds_port_configuration_agent *port_agent);
  682. enum sci_status scic_sds_port_configuration_agent_initialize(
  683. struct scic_sds_controller *controller,
  684. struct scic_sds_port_configuration_agent *port_agent);
  685. #endif