request.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #ifndef _ISCI_REQUEST_H_
  56. #define _ISCI_REQUEST_H_
  57. #include "isci.h"
  58. #include "host.h"
  59. #include "scu_task_context.h"
  60. /**
  61. * struct isci_request_status - This enum defines the possible states of an I/O
  62. * request.
  63. *
  64. *
  65. */
  66. enum isci_request_status {
  67. unallocated = 0x00,
  68. allocated = 0x01,
  69. started = 0x02,
  70. completed = 0x03,
  71. aborting = 0x04,
  72. aborted = 0x05,
  73. terminating = 0x06,
  74. dead = 0x07
  75. };
  76. enum task_type {
  77. io_task = 0,
  78. tmf_task = 1
  79. };
  80. enum sci_request_protocol {
  81. SCIC_NO_PROTOCOL,
  82. SCIC_SMP_PROTOCOL,
  83. SCIC_SSP_PROTOCOL,
  84. SCIC_STP_PROTOCOL
  85. }; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
  86. struct scic_sds_stp_request {
  87. union {
  88. u32 ncq;
  89. u32 udma;
  90. struct scic_sds_stp_pio_request {
  91. /*
  92. * Total transfer for the entire PIO request recorded
  93. * at request constuction time.
  94. *
  95. * @todo Should we just decrement this value for each
  96. * byte of data transitted or received to elemenate
  97. * the current_transfer_bytes field?
  98. */
  99. u32 total_transfer_bytes;
  100. /*
  101. * Total number of bytes received/transmitted in data
  102. * frames since the start of the IO request. At the
  103. * end of the IO request this should equal the
  104. * total_transfer_bytes.
  105. */
  106. u32 current_transfer_bytes;
  107. /*
  108. * The number of bytes requested in the in the PIO
  109. * setup.
  110. */
  111. u32 pio_transfer_bytes;
  112. /*
  113. * PIO Setup ending status value to tell us if we need
  114. * to wait for another FIS or if the transfer is
  115. * complete. On the receipt of a D2H FIS this will be
  116. * the status field of that FIS.
  117. */
  118. u8 ending_status;
  119. /*
  120. * On receipt of a D2H FIS this will be the ending
  121. * error field if the ending_status has the
  122. * SATA_STATUS_ERR bit set.
  123. */
  124. u8 ending_error;
  125. struct scic_sds_request_pio_sgl {
  126. struct scu_sgl_element_pair *sgl_pair;
  127. u8 sgl_set;
  128. u32 sgl_offset;
  129. } request_current;
  130. } pio;
  131. struct {
  132. /*
  133. * The number of bytes requested in the PIO setup
  134. * before CDB data frame.
  135. */
  136. u32 device_preferred_cdb_length;
  137. } packet;
  138. } type;
  139. };
  140. struct scic_sds_request {
  141. /*
  142. * This field contains the information for the base request state
  143. * machine.
  144. */
  145. struct sci_base_state_machine sm;
  146. /*
  147. * This field simply points to the controller to which this IO request
  148. * is associated.
  149. */
  150. struct scic_sds_controller *owning_controller;
  151. /*
  152. * This field simply points to the remote device to which this IO
  153. * request is associated.
  154. */
  155. struct scic_sds_remote_device *target_device;
  156. /*
  157. * This field is utilized to determine if the SCI user is managing
  158. * the IO tag for this request or if the core is managing it.
  159. */
  160. bool was_tag_assigned_by_user;
  161. /*
  162. * This field indicates the IO tag for this request. The IO tag is
  163. * comprised of the task_index and a sequence count. The sequence count
  164. * is utilized to help identify tasks from one life to another.
  165. */
  166. u16 io_tag;
  167. /*
  168. * This field specifies the protocol being utilized for this
  169. * IO request.
  170. */
  171. enum sci_request_protocol protocol;
  172. /*
  173. * This field indicates the completion status taken from the SCUs
  174. * completion code. It indicates the completion result for the SCU
  175. * hardware.
  176. */
  177. u32 scu_status;
  178. /*
  179. * This field indicates the completion status returned to the SCI user.
  180. * It indicates the users view of the io request completion.
  181. */
  182. u32 sci_status;
  183. /*
  184. * This field contains the value to be utilized when posting
  185. * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
  186. */
  187. u32 post_context;
  188. struct scu_task_context *task_context_buffer;
  189. struct scu_task_context tc ____cacheline_aligned;
  190. /* could be larger with sg chaining */
  191. #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
  192. struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
  193. /*
  194. * This field indicates if this request is a task management request or
  195. * normal IO request.
  196. */
  197. bool is_task_management_request;
  198. /*
  199. * This field is a pointer to the stored rx frame data. It is used in
  200. * STP internal requests and SMP response frames. If this field is
  201. * non-NULL the saved frame must be released on IO request completion.
  202. *
  203. * @todo In the future do we want to keep a list of RX frame buffers?
  204. */
  205. u32 saved_rx_frame_index;
  206. /*
  207. * This field in the recorded device sequence for the io request.
  208. * This is recorded during the build operation and is compared in the
  209. * start operation. If the sequence is different then there was a
  210. * change of devices from the build to start operations.
  211. */
  212. u8 device_sequence;
  213. union {
  214. struct {
  215. union {
  216. struct ssp_cmd_iu cmd;
  217. struct ssp_task_iu tmf;
  218. };
  219. union {
  220. struct ssp_response_iu rsp;
  221. u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
  222. };
  223. } ssp;
  224. struct {
  225. struct smp_req cmd;
  226. struct smp_resp rsp;
  227. } smp;
  228. struct {
  229. struct scic_sds_stp_request req;
  230. struct host_to_dev_fis cmd;
  231. struct dev_to_host_fis rsp;
  232. } stp;
  233. };
  234. };
  235. static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
  236. {
  237. struct scic_sds_request *sci_req;
  238. sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
  239. return sci_req;
  240. }
  241. struct isci_request {
  242. enum isci_request_status status;
  243. enum task_type ttype;
  244. unsigned short io_tag;
  245. bool complete_in_target;
  246. bool terminated;
  247. union ttype_ptr_union {
  248. struct sas_task *io_task_ptr; /* When ttype==io_task */
  249. struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
  250. } ttype_ptr;
  251. struct isci_host *isci_host;
  252. struct isci_remote_device *isci_device;
  253. /* For use in the requests_to_{complete|abort} lists: */
  254. struct list_head completed_node;
  255. /* For use in the reqs_in_process list: */
  256. struct list_head dev_node;
  257. spinlock_t state_lock;
  258. dma_addr_t request_daddr;
  259. dma_addr_t zero_scatter_daddr;
  260. unsigned int num_sg_entries; /* returned by pci_alloc_sg */
  261. /** Note: "io_request_completion" is completed in two different ways
  262. * depending on whether this is a TMF or regular request.
  263. * - TMF requests are completed in the thread that started them;
  264. * - regular requests are completed in the request completion callback
  265. * function.
  266. * This difference in operation allows the aborter of a TMF request
  267. * to be sure that once the TMF request completes, the I/O that the
  268. * TMF was aborting is guaranteed to have completed.
  269. */
  270. struct completion *io_request_completion;
  271. struct scic_sds_request sci;
  272. };
  273. static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
  274. {
  275. struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
  276. return ireq;
  277. }
  278. /**
  279. * enum sci_base_request_states - This enumeration depicts all the states for
  280. * the common request state machine.
  281. *
  282. *
  283. */
  284. enum sci_base_request_states {
  285. /*
  286. * Simply the initial state for the base request state machine.
  287. */
  288. SCI_REQ_INIT,
  289. /*
  290. * This state indicates that the request has been constructed.
  291. * This state is entered from the INITIAL state.
  292. */
  293. SCI_REQ_CONSTRUCTED,
  294. /*
  295. * This state indicates that the request has been started. This state
  296. * is entered from the CONSTRUCTED state.
  297. */
  298. SCI_REQ_STARTED,
  299. SCI_REQ_STP_UDMA_WAIT_TC_COMP,
  300. SCI_REQ_STP_UDMA_WAIT_D2H,
  301. SCI_REQ_STP_NON_DATA_WAIT_H2D,
  302. SCI_REQ_STP_NON_DATA_WAIT_D2H,
  303. SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
  304. SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
  305. SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
  306. /*
  307. * While in this state the IO request object is waiting for the TC
  308. * completion notification for the H2D Register FIS
  309. */
  310. SCI_REQ_STP_PIO_WAIT_H2D,
  311. /*
  312. * While in this state the IO request object is waiting for either a
  313. * PIO Setup FIS or a D2H register FIS. The type of frame received is
  314. * based on the result of the prior frame and line conditions.
  315. */
  316. SCI_REQ_STP_PIO_WAIT_FRAME,
  317. /*
  318. * While in this state the IO request object is waiting for a DATA
  319. * frame from the device.
  320. */
  321. SCI_REQ_STP_PIO_DATA_IN,
  322. /*
  323. * While in this state the IO request object is waiting to transmit
  324. * the next data frame to the device.
  325. */
  326. SCI_REQ_STP_PIO_DATA_OUT,
  327. /*
  328. * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
  329. * task management request is waiting for the transmission of the
  330. * initial frame (i.e. command, task, etc.).
  331. */
  332. SCI_REQ_TASK_WAIT_TC_COMP,
  333. /*
  334. * This sub-state indicates that the started task management request
  335. * is waiting for the reception of an unsolicited frame
  336. * (i.e. response IU).
  337. */
  338. SCI_REQ_TASK_WAIT_TC_RESP,
  339. /*
  340. * This sub-state indicates that the started task management request
  341. * is waiting for the reception of an unsolicited frame
  342. * (i.e. response IU).
  343. */
  344. SCI_REQ_SMP_WAIT_RESP,
  345. /*
  346. * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
  347. * request is waiting for the transmission of the initial frame
  348. * (i.e. command, task, etc.).
  349. */
  350. SCI_REQ_SMP_WAIT_TC_COMP,
  351. /*
  352. * This state indicates that the request has completed.
  353. * This state is entered from the STARTED state. This state is entered
  354. * from the ABORTING state.
  355. */
  356. SCI_REQ_COMPLETED,
  357. /*
  358. * This state indicates that the request is in the process of being
  359. * terminated/aborted.
  360. * This state is entered from the CONSTRUCTED state.
  361. * This state is entered from the STARTED state.
  362. */
  363. SCI_REQ_ABORTING,
  364. /*
  365. * Simply the final state for the base request state machine.
  366. */
  367. SCI_REQ_FINAL,
  368. };
  369. /**
  370. * scic_sds_request_get_controller() -
  371. *
  372. * This macro will return the controller for this io request object
  373. */
  374. #define scic_sds_request_get_controller(sci_req) \
  375. ((sci_req)->owning_controller)
  376. /**
  377. * scic_sds_request_get_device() -
  378. *
  379. * This macro will return the device for this io request object
  380. */
  381. #define scic_sds_request_get_device(sci_req) \
  382. ((sci_req)->target_device)
  383. /**
  384. * scic_sds_request_get_port() -
  385. *
  386. * This macro will return the port for this io request object
  387. */
  388. #define scic_sds_request_get_port(sci_req) \
  389. scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
  390. /**
  391. * scic_sds_request_get_post_context() -
  392. *
  393. * This macro returns the constructed post context result for the io request.
  394. */
  395. #define scic_sds_request_get_post_context(sci_req) \
  396. ((sci_req)->post_context)
  397. /**
  398. * scic_sds_request_get_task_context() -
  399. *
  400. * This is a helper macro to return the os handle for this request object.
  401. */
  402. #define scic_sds_request_get_task_context(request) \
  403. ((request)->task_context_buffer)
  404. /**
  405. * scic_sds_request_set_status() -
  406. *
  407. * This macro will set the scu hardware status and sci request completion
  408. * status for an io request.
  409. */
  410. #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
  411. { \
  412. (request)->scu_status = (scu_status_code); \
  413. (request)->sci_status = (sci_status_code); \
  414. }
  415. /**
  416. * SCU_SGL_ZERO() -
  417. *
  418. * This macro zeros the hardware SGL element data
  419. */
  420. #define SCU_SGL_ZERO(scu_sge) \
  421. { \
  422. (scu_sge).length = 0; \
  423. (scu_sge).address_lower = 0; \
  424. (scu_sge).address_upper = 0; \
  425. (scu_sge).address_modifier = 0; \
  426. }
  427. /**
  428. * SCU_SGL_COPY() -
  429. *
  430. * This macro copys the SGL Element data from the host os to the hardware SGL
  431. * elment data
  432. */
  433. #define SCU_SGL_COPY(scu_sge, os_sge) \
  434. { \
  435. (scu_sge).length = sg_dma_len(sg); \
  436. (scu_sge).address_upper = \
  437. upper_32_bits(sg_dma_address(sg)); \
  438. (scu_sge).address_lower = \
  439. lower_32_bits(sg_dma_address(sg)); \
  440. (scu_sge).address_modifier = 0; \
  441. }
  442. enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
  443. enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
  444. enum sci_status
  445. scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
  446. u32 event_code);
  447. enum sci_status
  448. scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
  449. u32 frame_index);
  450. enum sci_status
  451. scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
  452. extern enum sci_status
  453. scic_sds_request_complete(struct scic_sds_request *sci_req);
  454. extern enum sci_status
  455. scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
  456. /* XXX open code in caller */
  457. static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
  458. dma_addr_t phys_addr)
  459. {
  460. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  461. dma_addr_t offset;
  462. BUG_ON(phys_addr < ireq->request_daddr);
  463. offset = phys_addr - ireq->request_daddr;
  464. BUG_ON(offset >= sizeof(*ireq));
  465. return (char *)ireq + offset;
  466. }
  467. /* XXX open code in caller */
  468. static inline dma_addr_t
  469. scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
  470. {
  471. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  472. char *requested_addr = (char *)virt_addr;
  473. char *base_addr = (char *)ireq;
  474. BUG_ON(requested_addr < base_addr);
  475. BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
  476. return ireq->request_daddr + (requested_addr - base_addr);
  477. }
  478. /**
  479. * This function gets the status of the request object.
  480. * @request: This parameter points to the isci_request object
  481. *
  482. * status of the object as a isci_request_status enum.
  483. */
  484. static inline enum isci_request_status
  485. isci_request_get_state(struct isci_request *isci_request)
  486. {
  487. BUG_ON(isci_request == NULL);
  488. /*probably a bad sign... */
  489. if (isci_request->status == unallocated)
  490. dev_warn(&isci_request->isci_host->pdev->dev,
  491. "%s: isci_request->status == unallocated\n",
  492. __func__);
  493. return isci_request->status;
  494. }
  495. /**
  496. * isci_request_change_state() - This function sets the status of the request
  497. * object.
  498. * @request: This parameter points to the isci_request object
  499. * @status: This Parameter is the new status of the object
  500. *
  501. */
  502. static inline enum isci_request_status
  503. isci_request_change_state(struct isci_request *isci_request,
  504. enum isci_request_status status)
  505. {
  506. enum isci_request_status old_state;
  507. unsigned long flags;
  508. dev_dbg(&isci_request->isci_host->pdev->dev,
  509. "%s: isci_request = %p, state = 0x%x\n",
  510. __func__,
  511. isci_request,
  512. status);
  513. BUG_ON(isci_request == NULL);
  514. spin_lock_irqsave(&isci_request->state_lock, flags);
  515. old_state = isci_request->status;
  516. isci_request->status = status;
  517. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  518. return old_state;
  519. }
  520. /**
  521. * isci_request_change_started_to_newstate() - This function sets the status of
  522. * the request object.
  523. * @request: This parameter points to the isci_request object
  524. * @status: This Parameter is the new status of the object
  525. *
  526. * state previous to any change.
  527. */
  528. static inline enum isci_request_status
  529. isci_request_change_started_to_newstate(struct isci_request *isci_request,
  530. struct completion *completion_ptr,
  531. enum isci_request_status newstate)
  532. {
  533. enum isci_request_status old_state;
  534. unsigned long flags;
  535. spin_lock_irqsave(&isci_request->state_lock, flags);
  536. old_state = isci_request->status;
  537. if (old_state == started || old_state == aborting) {
  538. BUG_ON(isci_request->io_request_completion != NULL);
  539. isci_request->io_request_completion = completion_ptr;
  540. isci_request->status = newstate;
  541. }
  542. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  543. dev_dbg(&isci_request->isci_host->pdev->dev,
  544. "%s: isci_request = %p, old_state = 0x%x\n",
  545. __func__,
  546. isci_request,
  547. old_state);
  548. return old_state;
  549. }
  550. /**
  551. * isci_request_change_started_to_aborted() - This function sets the status of
  552. * the request object.
  553. * @request: This parameter points to the isci_request object
  554. * @completion_ptr: This parameter is saved as the kernel completion structure
  555. * signalled when the old request completes.
  556. *
  557. * state previous to any change.
  558. */
  559. static inline enum isci_request_status
  560. isci_request_change_started_to_aborted(struct isci_request *isci_request,
  561. struct completion *completion_ptr)
  562. {
  563. return isci_request_change_started_to_newstate(isci_request,
  564. completion_ptr,
  565. aborted);
  566. }
  567. /**
  568. * isci_request_free() - This function frees the request object.
  569. * @isci_host: This parameter specifies the ISCI host object
  570. * @isci_request: This parameter points to the isci_request object
  571. *
  572. */
  573. static inline void isci_request_free(struct isci_host *isci_host,
  574. struct isci_request *isci_request)
  575. {
  576. if (!isci_request)
  577. return;
  578. /* release the dma memory if we fail. */
  579. dma_pool_free(isci_host->dma_pool,
  580. isci_request,
  581. isci_request->request_daddr);
  582. }
  583. #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
  584. #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
  585. int isci_request_alloc_tmf(struct isci_host *isci_host,
  586. struct isci_tmf *isci_tmf,
  587. struct isci_request **isci_request,
  588. struct isci_remote_device *isci_device,
  589. gfp_t gfp_flags);
  590. int isci_request_execute(struct isci_host *isci_host,
  591. struct sas_task *task,
  592. struct isci_request **request,
  593. gfp_t gfp_flags);
  594. /**
  595. * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
  596. * sgl
  597. * @request: This parameter points to the isci_request object
  598. * @*pdev: This Parameter is the pci_device struct for the controller
  599. *
  600. */
  601. static inline void
  602. isci_request_unmap_sgl(struct isci_request *request, struct pci_dev *pdev)
  603. {
  604. struct sas_task *task = isci_request_access_task(request);
  605. dev_dbg(&request->isci_host->pdev->dev,
  606. "%s: request = %p, task = %p,\n"
  607. "task->data_dir = %d, is_sata = %d\n ",
  608. __func__,
  609. request,
  610. task,
  611. task->data_dir,
  612. sas_protocol_ata(task->task_proto));
  613. if ((task->data_dir != PCI_DMA_NONE) &&
  614. !sas_protocol_ata(task->task_proto)) {
  615. if (task->num_scatter == 0)
  616. /* 0 indicates a single dma address */
  617. dma_unmap_single(
  618. &pdev->dev,
  619. request->zero_scatter_daddr,
  620. task->total_xfer_len,
  621. task->data_dir
  622. );
  623. else /* unmap the sgl dma addresses */
  624. dma_unmap_sg(
  625. &pdev->dev,
  626. task->scatter,
  627. request->num_sg_entries,
  628. task->data_dir
  629. );
  630. }
  631. }
  632. /**
  633. * isci_request_io_request_get_next_sge() - This function is called by the sci
  634. * core to retrieve the next sge for a given request.
  635. * @request: This parameter is the isci_request object.
  636. * @current_sge_address: This parameter is the last sge retrieved by the sci
  637. * core for this request.
  638. *
  639. * pointer to the next sge for specified request.
  640. */
  641. static inline void *
  642. isci_request_io_request_get_next_sge(struct isci_request *request,
  643. void *current_sge_address)
  644. {
  645. struct sas_task *task = isci_request_access_task(request);
  646. void *ret = NULL;
  647. dev_dbg(&request->isci_host->pdev->dev,
  648. "%s: request = %p, "
  649. "current_sge_address = %p, "
  650. "num_scatter = %d\n",
  651. __func__,
  652. request,
  653. current_sge_address,
  654. task->num_scatter);
  655. if (!current_sge_address) /* First time through.. */
  656. ret = task->scatter; /* always task->scatter */
  657. else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
  658. ret = NULL; /* there is only one element. */
  659. else
  660. ret = sg_next(current_sge_address); /* sg_next returns NULL
  661. * for the last element
  662. */
  663. dev_dbg(&request->isci_host->pdev->dev,
  664. "%s: next sge address = %p\n",
  665. __func__,
  666. ret);
  667. return ret;
  668. }
  669. void
  670. isci_terminate_pending_requests(struct isci_host *isci_host,
  671. struct isci_remote_device *isci_device,
  672. enum isci_request_status new_request_state);
  673. enum sci_status
  674. scic_task_request_construct(struct scic_sds_controller *scic,
  675. struct scic_sds_remote_device *sci_dev,
  676. u16 io_tag,
  677. struct scic_sds_request *sci_req);
  678. enum sci_status
  679. scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
  680. enum sci_status
  681. scic_task_request_construct_sata(struct scic_sds_request *sci_req);
  682. void
  683. scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
  684. void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
  685. #endif /* !defined(_ISCI_REQUEST_H_) */