request.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #ifndef _ISCI_REQUEST_H_
  56. #define _ISCI_REQUEST_H_
  57. #include "isci.h"
  58. #include "host.h"
  59. #include "scu_task_context.h"
  60. /**
  61. * struct isci_request_status - This enum defines the possible states of an I/O
  62. * request.
  63. *
  64. *
  65. */
  66. enum isci_request_status {
  67. unallocated = 0x00,
  68. allocated = 0x01,
  69. started = 0x02,
  70. completed = 0x03,
  71. aborting = 0x04,
  72. aborted = 0x05,
  73. terminating = 0x06,
  74. dead = 0x07
  75. };
  76. enum task_type {
  77. io_task = 0,
  78. tmf_task = 1
  79. };
  80. enum sci_request_protocol {
  81. SCIC_NO_PROTOCOL,
  82. SCIC_SMP_PROTOCOL,
  83. SCIC_SSP_PROTOCOL,
  84. SCIC_STP_PROTOCOL
  85. }; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
  86. struct scic_sds_stp_request {
  87. union {
  88. u32 ncq;
  89. u32 udma;
  90. struct scic_sds_stp_pio_request {
  91. /**
  92. * Total transfer for the entire PIO request recorded at request constuction
  93. * time.
  94. *
  95. * @todo Should we just decrement this value for each byte of data transitted
  96. * or received to elemenate the current_transfer_bytes field?
  97. */
  98. u32 total_transfer_bytes;
  99. /**
  100. * Total number of bytes received/transmitted in data frames since the start
  101. * of the IO request. At the end of the IO request this should equal the
  102. * total_transfer_bytes.
  103. */
  104. u32 current_transfer_bytes;
  105. /**
  106. * The number of bytes requested in the in the PIO setup.
  107. */
  108. u32 pio_transfer_bytes;
  109. /**
  110. * PIO Setup ending status value to tell us if we need to wait for another FIS
  111. * or if the transfer is complete. On the receipt of a D2H FIS this will be
  112. * the status field of that FIS.
  113. */
  114. u8 ending_status;
  115. /**
  116. * On receipt of a D2H FIS this will be the ending error field if the
  117. * ending_status has the SATA_STATUS_ERR bit set.
  118. */
  119. u8 ending_error;
  120. struct scic_sds_request_pio_sgl {
  121. struct scu_sgl_element_pair *sgl_pair;
  122. u8 sgl_set;
  123. u32 sgl_offset;
  124. } request_current;
  125. } pio;
  126. struct {
  127. /**
  128. * The number of bytes requested in the PIO setup before CDB data frame.
  129. */
  130. u32 device_preferred_cdb_length;
  131. } packet;
  132. } type;
  133. };
  134. struct scic_sds_request {
  135. /**
  136. * This field contains the information for the base request state machine.
  137. */
  138. struct sci_base_state_machine state_machine;
  139. /**
  140. * This field simply points to the controller to which this IO request
  141. * is associated.
  142. */
  143. struct scic_sds_controller *owning_controller;
  144. /**
  145. * This field simply points to the remote device to which this IO request
  146. * is associated.
  147. */
  148. struct scic_sds_remote_device *target_device;
  149. /**
  150. * This field is utilized to determine if the SCI user is managing
  151. * the IO tag for this request or if the core is managing it.
  152. */
  153. bool was_tag_assigned_by_user;
  154. /**
  155. * This field indicates the IO tag for this request. The IO tag is
  156. * comprised of the task_index and a sequence count. The sequence count
  157. * is utilized to help identify tasks from one life to another.
  158. */
  159. u16 io_tag;
  160. /**
  161. * This field specifies the protocol being utilized for this
  162. * IO request.
  163. */
  164. enum sci_request_protocol protocol;
  165. /**
  166. * This field indicates the completion status taken from the SCUs
  167. * completion code. It indicates the completion result for the SCU hardware.
  168. */
  169. u32 scu_status;
  170. /**
  171. * This field indicates the completion status returned to the SCI user. It
  172. * indicates the users view of the io request completion.
  173. */
  174. u32 sci_status;
  175. /**
  176. * This field contains the value to be utilized when posting (e.g. Post_TC,
  177. * Post_TC_Abort) this request to the silicon.
  178. */
  179. u32 post_context;
  180. struct scu_task_context *task_context_buffer;
  181. struct scu_task_context tc ____cacheline_aligned;
  182. /* could be larger with sg chaining */
  183. #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
  184. struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
  185. /**
  186. * This field indicates if this request is a task management request or
  187. * normal IO request.
  188. */
  189. bool is_task_management_request;
  190. /**
  191. * This field is a pointer to the stored rx frame data. It is used in STP
  192. * internal requests and SMP response frames. If this field is non-NULL the
  193. * saved frame must be released on IO request completion.
  194. *
  195. * @todo In the future do we want to keep a list of RX frame buffers?
  196. */
  197. u32 saved_rx_frame_index;
  198. /**
  199. * This field in the recorded device sequence for the io request. This is
  200. * recorded during the build operation and is compared in the start
  201. * operation. If the sequence is different then there was a change of
  202. * devices from the build to start operations.
  203. */
  204. u8 device_sequence;
  205. union {
  206. struct {
  207. union {
  208. struct ssp_cmd_iu cmd;
  209. struct ssp_task_iu tmf;
  210. };
  211. union {
  212. struct ssp_response_iu rsp;
  213. u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
  214. };
  215. } ssp;
  216. struct {
  217. struct smp_req cmd;
  218. struct smp_resp rsp;
  219. } smp;
  220. struct {
  221. struct scic_sds_stp_request req;
  222. struct host_to_dev_fis cmd;
  223. struct dev_to_host_fis rsp;
  224. } stp;
  225. };
  226. };
  227. static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
  228. {
  229. struct scic_sds_request *sci_req;
  230. sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
  231. return sci_req;
  232. }
  233. struct isci_request {
  234. enum isci_request_status status;
  235. enum task_type ttype;
  236. unsigned short io_tag;
  237. bool complete_in_target;
  238. bool terminated;
  239. union ttype_ptr_union {
  240. struct sas_task *io_task_ptr; /* When ttype==io_task */
  241. struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
  242. } ttype_ptr;
  243. struct isci_host *isci_host;
  244. struct isci_remote_device *isci_device;
  245. /* For use in the requests_to_{complete|abort} lists: */
  246. struct list_head completed_node;
  247. /* For use in the reqs_in_process list: */
  248. struct list_head dev_node;
  249. spinlock_t state_lock;
  250. dma_addr_t request_daddr;
  251. dma_addr_t zero_scatter_daddr;
  252. unsigned int num_sg_entries; /* returned by pci_alloc_sg */
  253. /** Note: "io_request_completion" is completed in two different ways
  254. * depending on whether this is a TMF or regular request.
  255. * - TMF requests are completed in the thread that started them;
  256. * - regular requests are completed in the request completion callback
  257. * function.
  258. * This difference in operation allows the aborter of a TMF request
  259. * to be sure that once the TMF request completes, the I/O that the
  260. * TMF was aborting is guaranteed to have completed.
  261. */
  262. struct completion *io_request_completion;
  263. struct scic_sds_request sci;
  264. };
  265. static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
  266. {
  267. struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
  268. return ireq;
  269. }
  270. /**
  271. * enum sci_base_request_states - This enumeration depicts all the states for
  272. * the common request state machine.
  273. *
  274. *
  275. */
  276. enum sci_base_request_states {
  277. /**
  278. * Simply the initial state for the base request state machine.
  279. */
  280. SCI_BASE_REQUEST_STATE_INITIAL,
  281. /**
  282. * This state indicates that the request has been constructed. This state
  283. * is entered from the INITIAL state.
  284. */
  285. SCI_BASE_REQUEST_STATE_CONSTRUCTED,
  286. /**
  287. * This state indicates that the request has been started. This state is
  288. * entered from the CONSTRUCTED state.
  289. */
  290. SCI_BASE_REQUEST_STATE_STARTED,
  291. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
  292. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
  293. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
  294. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
  295. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
  296. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
  297. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
  298. /**
  299. * While in this state the IO request object is waiting for the TC completion
  300. * notification for the H2D Register FIS
  301. */
  302. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
  303. /**
  304. * While in this state the IO request object is waiting for either a PIO Setup
  305. * FIS or a D2H register FIS. The type of frame received is based on the
  306. * result of the prior frame and line conditions.
  307. */
  308. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
  309. /**
  310. * While in this state the IO request object is waiting for a DATA frame from
  311. * the device.
  312. */
  313. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
  314. /**
  315. * While in this state the IO request object is waiting to transmit the next data
  316. * frame to the device.
  317. */
  318. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
  319. /**
  320. * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
  321. * task management request is waiting for the transmission of the
  322. * initial frame (i.e. command, task, etc.).
  323. */
  324. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
  325. /**
  326. * This sub-state indicates that the started task management request
  327. * is waiting for the reception of an unsolicited frame
  328. * (i.e. response IU).
  329. */
  330. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
  331. /**
  332. * This sub-state indicates that the started task management request
  333. * is waiting for the reception of an unsolicited frame
  334. * (i.e. response IU).
  335. */
  336. SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
  337. /**
  338. * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
  339. * waiting for the transmission of the initial frame (i.e. command, task, etc.).
  340. */
  341. SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
  342. /**
  343. * This state indicates that the request has completed.
  344. * This state is entered from the STARTED state. This state is entered from
  345. * the ABORTING state.
  346. */
  347. SCI_BASE_REQUEST_STATE_COMPLETED,
  348. /**
  349. * This state indicates that the request is in the process of being
  350. * terminated/aborted.
  351. * This state is entered from the CONSTRUCTED state.
  352. * This state is entered from the STARTED state.
  353. */
  354. SCI_BASE_REQUEST_STATE_ABORTING,
  355. /**
  356. * Simply the final state for the base request state machine.
  357. */
  358. SCI_BASE_REQUEST_STATE_FINAL,
  359. };
  360. /**
  361. * scic_sds_request_get_controller() -
  362. *
  363. * This macro will return the controller for this io request object
  364. */
  365. #define scic_sds_request_get_controller(sci_req) \
  366. ((sci_req)->owning_controller)
  367. /**
  368. * scic_sds_request_get_device() -
  369. *
  370. * This macro will return the device for this io request object
  371. */
  372. #define scic_sds_request_get_device(sci_req) \
  373. ((sci_req)->target_device)
  374. /**
  375. * scic_sds_request_get_port() -
  376. *
  377. * This macro will return the port for this io request object
  378. */
  379. #define scic_sds_request_get_port(sci_req) \
  380. scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
  381. /**
  382. * scic_sds_request_get_post_context() -
  383. *
  384. * This macro returns the constructed post context result for the io request.
  385. */
  386. #define scic_sds_request_get_post_context(sci_req) \
  387. ((sci_req)->post_context)
  388. /**
  389. * scic_sds_request_get_task_context() -
  390. *
  391. * This is a helper macro to return the os handle for this request object.
  392. */
  393. #define scic_sds_request_get_task_context(request) \
  394. ((request)->task_context_buffer)
  395. /**
  396. * scic_sds_request_set_status() -
  397. *
  398. * This macro will set the scu hardware status and sci request completion
  399. * status for an io request.
  400. */
  401. #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
  402. { \
  403. (request)->scu_status = (scu_status_code); \
  404. (request)->sci_status = (sci_status_code); \
  405. }
  406. /**
  407. * SCU_SGL_ZERO() -
  408. *
  409. * This macro zeros the hardware SGL element data
  410. */
  411. #define SCU_SGL_ZERO(scu_sge) \
  412. { \
  413. (scu_sge).length = 0; \
  414. (scu_sge).address_lower = 0; \
  415. (scu_sge).address_upper = 0; \
  416. (scu_sge).address_modifier = 0; \
  417. }
  418. /**
  419. * SCU_SGL_COPY() -
  420. *
  421. * This macro copys the SGL Element data from the host os to the hardware SGL
  422. * elment data
  423. */
  424. #define SCU_SGL_COPY(scu_sge, os_sge) \
  425. { \
  426. (scu_sge).length = sg_dma_len(sg); \
  427. (scu_sge).address_upper = \
  428. upper_32_bits(sg_dma_address(sg)); \
  429. (scu_sge).address_lower = \
  430. lower_32_bits(sg_dma_address(sg)); \
  431. (scu_sge).address_modifier = 0; \
  432. }
  433. enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
  434. enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
  435. enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
  436. u32 event_code);
  437. enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
  438. u32 frame_index);
  439. enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
  440. extern enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req);
  441. extern enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
  442. /* XXX open code in caller */
  443. static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
  444. dma_addr_t phys_addr)
  445. {
  446. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  447. dma_addr_t offset;
  448. BUG_ON(phys_addr < ireq->request_daddr);
  449. offset = phys_addr - ireq->request_daddr;
  450. BUG_ON(offset >= sizeof(*ireq));
  451. return (char *)ireq + offset;
  452. }
  453. /* XXX open code in caller */
  454. static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
  455. void *virt_addr)
  456. {
  457. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  458. char *requested_addr = (char *)virt_addr;
  459. char *base_addr = (char *)ireq;
  460. BUG_ON(requested_addr < base_addr);
  461. BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
  462. return ireq->request_daddr + (requested_addr - base_addr);
  463. }
  464. /**
  465. * This function gets the status of the request object.
  466. * @request: This parameter points to the isci_request object
  467. *
  468. * status of the object as a isci_request_status enum.
  469. */
  470. static inline
  471. enum isci_request_status isci_request_get_state(
  472. struct isci_request *isci_request)
  473. {
  474. BUG_ON(isci_request == NULL);
  475. /*probably a bad sign... */
  476. if (isci_request->status == unallocated)
  477. dev_warn(&isci_request->isci_host->pdev->dev,
  478. "%s: isci_request->status == unallocated\n",
  479. __func__);
  480. return isci_request->status;
  481. }
  482. /**
  483. * isci_request_change_state() - This function sets the status of the request
  484. * object.
  485. * @request: This parameter points to the isci_request object
  486. * @status: This Parameter is the new status of the object
  487. *
  488. */
  489. static inline enum isci_request_status isci_request_change_state(
  490. struct isci_request *isci_request,
  491. enum isci_request_status status)
  492. {
  493. enum isci_request_status old_state;
  494. unsigned long flags;
  495. dev_dbg(&isci_request->isci_host->pdev->dev,
  496. "%s: isci_request = %p, state = 0x%x\n",
  497. __func__,
  498. isci_request,
  499. status);
  500. BUG_ON(isci_request == NULL);
  501. spin_lock_irqsave(&isci_request->state_lock, flags);
  502. old_state = isci_request->status;
  503. isci_request->status = status;
  504. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  505. return old_state;
  506. }
  507. /**
  508. * isci_request_change_started_to_newstate() - This function sets the status of
  509. * the request object.
  510. * @request: This parameter points to the isci_request object
  511. * @status: This Parameter is the new status of the object
  512. *
  513. * state previous to any change.
  514. */
  515. static inline enum isci_request_status isci_request_change_started_to_newstate(
  516. struct isci_request *isci_request,
  517. struct completion *completion_ptr,
  518. enum isci_request_status newstate)
  519. {
  520. enum isci_request_status old_state;
  521. unsigned long flags;
  522. spin_lock_irqsave(&isci_request->state_lock, flags);
  523. old_state = isci_request->status;
  524. if (old_state == started || old_state == aborting) {
  525. BUG_ON(isci_request->io_request_completion != NULL);
  526. isci_request->io_request_completion = completion_ptr;
  527. isci_request->status = newstate;
  528. }
  529. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  530. dev_dbg(&isci_request->isci_host->pdev->dev,
  531. "%s: isci_request = %p, old_state = 0x%x\n",
  532. __func__,
  533. isci_request,
  534. old_state);
  535. return old_state;
  536. }
  537. /**
  538. * isci_request_change_started_to_aborted() - This function sets the status of
  539. * the request object.
  540. * @request: This parameter points to the isci_request object
  541. * @completion_ptr: This parameter is saved as the kernel completion structure
  542. * signalled when the old request completes.
  543. *
  544. * state previous to any change.
  545. */
  546. static inline enum isci_request_status isci_request_change_started_to_aborted(
  547. struct isci_request *isci_request,
  548. struct completion *completion_ptr)
  549. {
  550. return isci_request_change_started_to_newstate(
  551. isci_request, completion_ptr, aborted
  552. );
  553. }
  554. /**
  555. * isci_request_free() - This function frees the request object.
  556. * @isci_host: This parameter specifies the ISCI host object
  557. * @isci_request: This parameter points to the isci_request object
  558. *
  559. */
  560. static inline void isci_request_free(
  561. struct isci_host *isci_host,
  562. struct isci_request *isci_request)
  563. {
  564. if (!isci_request)
  565. return;
  566. /* release the dma memory if we fail. */
  567. dma_pool_free(isci_host->dma_pool, isci_request,
  568. isci_request->request_daddr);
  569. }
  570. /* #define ISCI_REQUEST_VALIDATE_ACCESS
  571. */
  572. #ifdef ISCI_REQUEST_VALIDATE_ACCESS
  573. static inline
  574. struct sas_task *isci_request_access_task(struct isci_request *isci_request)
  575. {
  576. BUG_ON(isci_request->ttype != io_task);
  577. return isci_request->ttype_ptr.io_task_ptr;
  578. }
  579. static inline
  580. struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
  581. {
  582. BUG_ON(isci_request->ttype != tmf_task);
  583. return isci_request->ttype_ptr.tmf_task_ptr;
  584. }
  585. #else /* not ISCI_REQUEST_VALIDATE_ACCESS */
  586. #define isci_request_access_task(RequestPtr) \
  587. ((RequestPtr)->ttype_ptr.io_task_ptr)
  588. #define isci_request_access_tmf(RequestPtr) \
  589. ((RequestPtr)->ttype_ptr.tmf_task_ptr)
  590. #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
  591. int isci_request_alloc_tmf(
  592. struct isci_host *isci_host,
  593. struct isci_tmf *isci_tmf,
  594. struct isci_request **isci_request,
  595. struct isci_remote_device *isci_device,
  596. gfp_t gfp_flags);
  597. int isci_request_execute(
  598. struct isci_host *isci_host,
  599. struct sas_task *task,
  600. struct isci_request **request,
  601. gfp_t gfp_flags);
  602. /**
  603. * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
  604. * sgl
  605. * @request: This parameter points to the isci_request object
  606. * @*pdev: This Parameter is the pci_device struct for the controller
  607. *
  608. */
  609. static inline void isci_request_unmap_sgl(
  610. struct isci_request *request,
  611. struct pci_dev *pdev)
  612. {
  613. struct sas_task *task = isci_request_access_task(request);
  614. dev_dbg(&request->isci_host->pdev->dev,
  615. "%s: request = %p, task = %p,\n"
  616. "task->data_dir = %d, is_sata = %d\n ",
  617. __func__,
  618. request,
  619. task,
  620. task->data_dir,
  621. sas_protocol_ata(task->task_proto));
  622. if ((task->data_dir != PCI_DMA_NONE) &&
  623. !sas_protocol_ata(task->task_proto)) {
  624. if (task->num_scatter == 0)
  625. /* 0 indicates a single dma address */
  626. dma_unmap_single(
  627. &pdev->dev,
  628. request->zero_scatter_daddr,
  629. task->total_xfer_len,
  630. task->data_dir
  631. );
  632. else /* unmap the sgl dma addresses */
  633. dma_unmap_sg(
  634. &pdev->dev,
  635. task->scatter,
  636. request->num_sg_entries,
  637. task->data_dir
  638. );
  639. }
  640. }
  641. /**
  642. * isci_request_io_request_get_next_sge() - This function is called by the sci
  643. * core to retrieve the next sge for a given request.
  644. * @request: This parameter is the isci_request object.
  645. * @current_sge_address: This parameter is the last sge retrieved by the sci
  646. * core for this request.
  647. *
  648. * pointer to the next sge for specified request.
  649. */
  650. static inline void *isci_request_io_request_get_next_sge(
  651. struct isci_request *request,
  652. void *current_sge_address)
  653. {
  654. struct sas_task *task = isci_request_access_task(request);
  655. void *ret = NULL;
  656. dev_dbg(&request->isci_host->pdev->dev,
  657. "%s: request = %p, "
  658. "current_sge_address = %p, "
  659. "num_scatter = %d\n",
  660. __func__,
  661. request,
  662. current_sge_address,
  663. task->num_scatter);
  664. if (!current_sge_address) /* First time through.. */
  665. ret = task->scatter; /* always task->scatter */
  666. else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
  667. ret = NULL; /* there is only one element. */
  668. else
  669. ret = sg_next(current_sge_address); /* sg_next returns NULL
  670. * for the last element
  671. */
  672. dev_dbg(&request->isci_host->pdev->dev,
  673. "%s: next sge address = %p\n",
  674. __func__,
  675. ret);
  676. return ret;
  677. }
  678. void isci_terminate_pending_requests(struct isci_host *isci_host,
  679. struct isci_remote_device *isci_device,
  680. enum isci_request_status new_request_state);
  681. enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
  682. struct scic_sds_remote_device *sci_dev,
  683. u16 io_tag,
  684. struct scic_sds_request *sci_req);
  685. enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
  686. enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
  687. void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
  688. void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
  689. #endif /* !defined(_ISCI_REQUEST_H_) */