request.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #ifndef _ISCI_REQUEST_H_
  56. #define _ISCI_REQUEST_H_
  57. #include "isci.h"
  58. #include "host.h"
  59. #include "scu_task_context.h"
  60. #include "stp_request.h"
  61. /**
  62. * struct isci_request_status - This enum defines the possible states of an I/O
  63. * request.
  64. *
  65. *
  66. */
  67. enum isci_request_status {
  68. unallocated = 0x00,
  69. allocated = 0x01,
  70. started = 0x02,
  71. completed = 0x03,
  72. aborting = 0x04,
  73. aborted = 0x05,
  74. terminating = 0x06,
  75. dead = 0x07
  76. };
  77. enum task_type {
  78. io_task = 0,
  79. tmf_task = 1
  80. };
  81. enum sci_request_protocol {
  82. SCIC_NO_PROTOCOL,
  83. SCIC_SMP_PROTOCOL,
  84. SCIC_SSP_PROTOCOL,
  85. SCIC_STP_PROTOCOL
  86. }; /* XXX remove me, use sas_task.dev instead */;
  87. struct scic_sds_request {
  88. /**
  89. * This field contains the information for the base request state machine.
  90. */
  91. struct sci_base_state_machine state_machine;
  92. /**
  93. * This field simply points to the controller to which this IO request
  94. * is associated.
  95. */
  96. struct scic_sds_controller *owning_controller;
  97. /**
  98. * This field simply points to the remote device to which this IO request
  99. * is associated.
  100. */
  101. struct scic_sds_remote_device *target_device;
  102. /**
  103. * This field is utilized to determine if the SCI user is managing
  104. * the IO tag for this request or if the core is managing it.
  105. */
  106. bool was_tag_assigned_by_user;
  107. /**
  108. * This field indicates the IO tag for this request. The IO tag is
  109. * comprised of the task_index and a sequence count. The sequence count
  110. * is utilized to help identify tasks from one life to another.
  111. */
  112. u16 io_tag;
  113. /**
  114. * This field specifies the protocol being utilized for this
  115. * IO request.
  116. */
  117. enum sci_request_protocol protocol;
  118. /**
  119. * This field indicates the completion status taken from the SCUs
  120. * completion code. It indicates the completion result for the SCU hardware.
  121. */
  122. u32 scu_status;
  123. /**
  124. * This field indicates the completion status returned to the SCI user. It
  125. * indicates the users view of the io request completion.
  126. */
  127. u32 sci_status;
  128. /**
  129. * This field contains the value to be utilized when posting (e.g. Post_TC,
  130. * Post_TC_Abort) this request to the silicon.
  131. */
  132. u32 post_context;
  133. struct scu_task_context *task_context_buffer;
  134. struct scu_task_context tc ____cacheline_aligned;
  135. /* could be larger with sg chaining */
  136. #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
  137. struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
  138. /**
  139. * This field indicates if this request is a task management request or
  140. * normal IO request.
  141. */
  142. bool is_task_management_request;
  143. /**
  144. * This field indicates that this request contains an initialized started
  145. * substate machine.
  146. */
  147. bool has_started_substate_machine;
  148. /**
  149. * This field is a pointer to the stored rx frame data. It is used in STP
  150. * internal requests and SMP response frames. If this field is non-NULL the
  151. * saved frame must be released on IO request completion.
  152. *
  153. * @todo In the future do we want to keep a list of RX frame buffers?
  154. */
  155. u32 saved_rx_frame_index;
  156. /**
  157. * This field specifies the data necessary to manage the sub-state
  158. * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state.
  159. */
  160. struct sci_base_state_machine started_substate_machine;
  161. /**
  162. * This field specifies the current state handlers in place for this
  163. * IO Request object. This field is updated each time the request
  164. * changes state.
  165. */
  166. const struct scic_sds_io_request_state_handler *state_handlers;
  167. /**
  168. * This field in the recorded device sequence for the io request. This is
  169. * recorded during the build operation and is compared in the start
  170. * operation. If the sequence is different then there was a change of
  171. * devices from the build to start operations.
  172. */
  173. u8 device_sequence;
  174. union {
  175. struct {
  176. union {
  177. struct ssp_cmd_iu cmd;
  178. struct ssp_task_iu tmf;
  179. };
  180. union {
  181. struct ssp_response_iu rsp;
  182. u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
  183. };
  184. } ssp;
  185. struct {
  186. struct smp_req cmd;
  187. struct smp_resp rsp;
  188. } smp;
  189. struct {
  190. struct scic_sds_stp_request req;
  191. struct host_to_dev_fis cmd;
  192. struct dev_to_host_fis rsp;
  193. } stp;
  194. };
  195. };
  196. static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
  197. {
  198. struct scic_sds_request *sci_req;
  199. sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
  200. return sci_req;
  201. }
  202. struct isci_request {
  203. enum isci_request_status status;
  204. enum task_type ttype;
  205. unsigned short io_tag;
  206. bool complete_in_target;
  207. bool terminated;
  208. union ttype_ptr_union {
  209. struct sas_task *io_task_ptr; /* When ttype==io_task */
  210. struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
  211. } ttype_ptr;
  212. struct isci_host *isci_host;
  213. struct isci_remote_device *isci_device;
  214. /* For use in the requests_to_{complete|abort} lists: */
  215. struct list_head completed_node;
  216. /* For use in the reqs_in_process list: */
  217. struct list_head dev_node;
  218. spinlock_t state_lock;
  219. dma_addr_t request_daddr;
  220. dma_addr_t zero_scatter_daddr;
  221. unsigned int num_sg_entries; /* returned by pci_alloc_sg */
  222. /** Note: "io_request_completion" is completed in two different ways
  223. * depending on whether this is a TMF or regular request.
  224. * - TMF requests are completed in the thread that started them;
  225. * - regular requests are completed in the request completion callback
  226. * function.
  227. * This difference in operation allows the aborter of a TMF request
  228. * to be sure that once the TMF request completes, the I/O that the
  229. * TMF was aborting is guaranteed to have completed.
  230. */
  231. struct completion *io_request_completion;
  232. struct scic_sds_request sci;
  233. };
  234. static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
  235. {
  236. struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
  237. return ireq;
  238. }
  239. /**
  240. * enum sci_base_request_states - This enumeration depicts all the states for
  241. * the common request state machine.
  242. *
  243. *
  244. */
  245. enum sci_base_request_states {
  246. /**
  247. * Simply the initial state for the base request state machine.
  248. */
  249. SCI_BASE_REQUEST_STATE_INITIAL,
  250. /**
  251. * This state indicates that the request has been constructed. This state
  252. * is entered from the INITIAL state.
  253. */
  254. SCI_BASE_REQUEST_STATE_CONSTRUCTED,
  255. /**
  256. * This state indicates that the request has been started. This state is
  257. * entered from the CONSTRUCTED state.
  258. */
  259. SCI_BASE_REQUEST_STATE_STARTED,
  260. /**
  261. * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
  262. * task management request is waiting for the transmission of the
  263. * initial frame (i.e. command, task, etc.).
  264. */
  265. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
  266. /**
  267. * This sub-state indicates that the started task management request
  268. * is waiting for the reception of an unsolicited frame
  269. * (i.e. response IU).
  270. */
  271. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
  272. /**
  273. * This state indicates that the request has completed.
  274. * This state is entered from the STARTED state. This state is entered from
  275. * the ABORTING state.
  276. */
  277. SCI_BASE_REQUEST_STATE_COMPLETED,
  278. /**
  279. * This state indicates that the request is in the process of being
  280. * terminated/aborted.
  281. * This state is entered from the CONSTRUCTED state.
  282. * This state is entered from the STARTED state.
  283. */
  284. SCI_BASE_REQUEST_STATE_ABORTING,
  285. /**
  286. * Simply the final state for the base request state machine.
  287. */
  288. SCI_BASE_REQUEST_STATE_FINAL,
  289. };
  290. typedef enum sci_status (*scic_sds_io_request_handler_t)
  291. (struct scic_sds_request *request);
  292. typedef enum sci_status (*scic_sds_io_request_frame_handler_t)
  293. (struct scic_sds_request *req, u32 frame);
  294. typedef enum sci_status (*scic_sds_io_request_event_handler_t)
  295. (struct scic_sds_request *req, u32 event);
  296. typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t)
  297. (struct scic_sds_request *req, u32 completion_code);
  298. /**
  299. * struct scic_sds_io_request_state_handler - This is the SDS core definition
  300. * of the state handlers.
  301. *
  302. *
  303. */
  304. struct scic_sds_io_request_state_handler {
  305. /**
  306. * The start_handler specifies the method invoked when a user attempts to
  307. * start a request.
  308. */
  309. scic_sds_io_request_handler_t start_handler;
  310. /**
  311. * The abort_handler specifies the method invoked when a user attempts to
  312. * abort a request.
  313. */
  314. scic_sds_io_request_handler_t abort_handler;
  315. /**
  316. * The complete_handler specifies the method invoked when a user attempts to
  317. * complete a request.
  318. */
  319. scic_sds_io_request_handler_t complete_handler;
  320. scic_sds_io_request_task_completion_handler_t tc_completion_handler;
  321. scic_sds_io_request_event_handler_t event_handler;
  322. scic_sds_io_request_frame_handler_t frame_handler;
  323. };
  324. extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[];
  325. /**
  326. * scic_sds_request_get_controller() -
  327. *
  328. * This macro will return the controller for this io request object
  329. */
  330. #define scic_sds_request_get_controller(sci_req) \
  331. ((sci_req)->owning_controller)
  332. /**
  333. * scic_sds_request_get_device() -
  334. *
  335. * This macro will return the device for this io request object
  336. */
  337. #define scic_sds_request_get_device(sci_req) \
  338. ((sci_req)->target_device)
  339. /**
  340. * scic_sds_request_get_port() -
  341. *
  342. * This macro will return the port for this io request object
  343. */
  344. #define scic_sds_request_get_port(sci_req) \
  345. scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
  346. /**
  347. * scic_sds_request_get_post_context() -
  348. *
  349. * This macro returns the constructed post context result for the io request.
  350. */
  351. #define scic_sds_request_get_post_context(sci_req) \
  352. ((sci_req)->post_context)
  353. /**
  354. * scic_sds_request_get_task_context() -
  355. *
  356. * This is a helper macro to return the os handle for this request object.
  357. */
  358. #define scic_sds_request_get_task_context(request) \
  359. ((request)->task_context_buffer)
  360. /**
  361. * scic_sds_request_set_status() -
  362. *
  363. * This macro will set the scu hardware status and sci request completion
  364. * status for an io request.
  365. */
  366. #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
  367. { \
  368. (request)->scu_status = (scu_status_code); \
  369. (request)->sci_status = (sci_status_code); \
  370. }
  371. #define scic_sds_request_complete(a_request) \
  372. ((a_request)->state_handlers->complete_handler(a_request))
  373. extern enum sci_status
  374. scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code);
  375. /**
  376. * SCU_SGL_ZERO() -
  377. *
  378. * This macro zeros the hardware SGL element data
  379. */
  380. #define SCU_SGL_ZERO(scu_sge) \
  381. { \
  382. (scu_sge).length = 0; \
  383. (scu_sge).address_lower = 0; \
  384. (scu_sge).address_upper = 0; \
  385. (scu_sge).address_modifier = 0; \
  386. }
  387. /**
  388. * SCU_SGL_COPY() -
  389. *
  390. * This macro copys the SGL Element data from the host os to the hardware SGL
  391. * elment data
  392. */
  393. #define SCU_SGL_COPY(scu_sge, os_sge) \
  394. { \
  395. (scu_sge).length = sg_dma_len(sg); \
  396. (scu_sge).address_upper = \
  397. upper_32_bits(sg_dma_address(sg)); \
  398. (scu_sge).address_lower = \
  399. lower_32_bits(sg_dma_address(sg)); \
  400. (scu_sge).address_modifier = 0; \
  401. }
  402. void scic_sds_request_build_sgl(struct scic_sds_request *sci_req);
  403. void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req);
  404. void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req);
  405. enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
  406. enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
  407. enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
  408. u32 event_code);
  409. enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
  410. u32 frame_index);
  411. enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
  412. enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req);
  413. /**
  414. * enum _scic_sds_smp_request_started_substates - This enumeration depicts all
  415. * of the substates for a SMP request to be performed in the STARTED
  416. * super-state.
  417. *
  418. *
  419. */
  420. enum scic_sds_smp_request_started_substates {
  421. /**
  422. * This sub-state indicates that the started task management request
  423. * is waiting for the reception of an unsolicited frame
  424. * (i.e. response IU).
  425. */
  426. SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
  427. /**
  428. * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
  429. * waiting for the transmission of the initial frame (i.e. command, task, etc.).
  430. */
  431. SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
  432. };
  433. /* XXX open code in caller */
  434. static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
  435. dma_addr_t phys_addr)
  436. {
  437. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  438. dma_addr_t offset;
  439. BUG_ON(phys_addr < ireq->request_daddr);
  440. offset = phys_addr - ireq->request_daddr;
  441. BUG_ON(offset >= sizeof(*ireq));
  442. return (char *)ireq + offset;
  443. }
  444. /* XXX open code in caller */
  445. static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
  446. void *virt_addr)
  447. {
  448. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  449. char *requested_addr = (char *)virt_addr;
  450. char *base_addr = (char *)ireq;
  451. BUG_ON(requested_addr < base_addr);
  452. BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
  453. return ireq->request_daddr + (requested_addr - base_addr);
  454. }
  455. /**
  456. * This function gets the status of the request object.
  457. * @request: This parameter points to the isci_request object
  458. *
  459. * status of the object as a isci_request_status enum.
  460. */
  461. static inline
  462. enum isci_request_status isci_request_get_state(
  463. struct isci_request *isci_request)
  464. {
  465. BUG_ON(isci_request == NULL);
  466. /*probably a bad sign... */
  467. if (isci_request->status == unallocated)
  468. dev_warn(&isci_request->isci_host->pdev->dev,
  469. "%s: isci_request->status == unallocated\n",
  470. __func__);
  471. return isci_request->status;
  472. }
  473. /**
  474. * isci_request_change_state() - This function sets the status of the request
  475. * object.
  476. * @request: This parameter points to the isci_request object
  477. * @status: This Parameter is the new status of the object
  478. *
  479. */
  480. static inline enum isci_request_status isci_request_change_state(
  481. struct isci_request *isci_request,
  482. enum isci_request_status status)
  483. {
  484. enum isci_request_status old_state;
  485. unsigned long flags;
  486. dev_dbg(&isci_request->isci_host->pdev->dev,
  487. "%s: isci_request = %p, state = 0x%x\n",
  488. __func__,
  489. isci_request,
  490. status);
  491. BUG_ON(isci_request == NULL);
  492. spin_lock_irqsave(&isci_request->state_lock, flags);
  493. old_state = isci_request->status;
  494. isci_request->status = status;
  495. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  496. return old_state;
  497. }
  498. /**
  499. * isci_request_change_started_to_newstate() - This function sets the status of
  500. * the request object.
  501. * @request: This parameter points to the isci_request object
  502. * @status: This Parameter is the new status of the object
  503. *
  504. * state previous to any change.
  505. */
  506. static inline enum isci_request_status isci_request_change_started_to_newstate(
  507. struct isci_request *isci_request,
  508. struct completion *completion_ptr,
  509. enum isci_request_status newstate)
  510. {
  511. enum isci_request_status old_state;
  512. unsigned long flags;
  513. spin_lock_irqsave(&isci_request->state_lock, flags);
  514. old_state = isci_request->status;
  515. if (old_state == started || old_state == aborting) {
  516. BUG_ON(isci_request->io_request_completion != NULL);
  517. isci_request->io_request_completion = completion_ptr;
  518. isci_request->status = newstate;
  519. }
  520. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  521. dev_dbg(&isci_request->isci_host->pdev->dev,
  522. "%s: isci_request = %p, old_state = 0x%x\n",
  523. __func__,
  524. isci_request,
  525. old_state);
  526. return old_state;
  527. }
  528. /**
  529. * isci_request_change_started_to_aborted() - This function sets the status of
  530. * the request object.
  531. * @request: This parameter points to the isci_request object
  532. * @completion_ptr: This parameter is saved as the kernel completion structure
  533. * signalled when the old request completes.
  534. *
  535. * state previous to any change.
  536. */
  537. static inline enum isci_request_status isci_request_change_started_to_aborted(
  538. struct isci_request *isci_request,
  539. struct completion *completion_ptr)
  540. {
  541. return isci_request_change_started_to_newstate(
  542. isci_request, completion_ptr, aborted
  543. );
  544. }
  545. /**
  546. * isci_request_free() - This function frees the request object.
  547. * @isci_host: This parameter specifies the ISCI host object
  548. * @isci_request: This parameter points to the isci_request object
  549. *
  550. */
  551. static inline void isci_request_free(
  552. struct isci_host *isci_host,
  553. struct isci_request *isci_request)
  554. {
  555. if (!isci_request)
  556. return;
  557. /* release the dma memory if we fail. */
  558. dma_pool_free(isci_host->dma_pool, isci_request,
  559. isci_request->request_daddr);
  560. }
  561. /* #define ISCI_REQUEST_VALIDATE_ACCESS
  562. */
  563. #ifdef ISCI_REQUEST_VALIDATE_ACCESS
  564. static inline
  565. struct sas_task *isci_request_access_task(struct isci_request *isci_request)
  566. {
  567. BUG_ON(isci_request->ttype != io_task);
  568. return isci_request->ttype_ptr.io_task_ptr;
  569. }
  570. static inline
  571. struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
  572. {
  573. BUG_ON(isci_request->ttype != tmf_task);
  574. return isci_request->ttype_ptr.tmf_task_ptr;
  575. }
  576. #else /* not ISCI_REQUEST_VALIDATE_ACCESS */
  577. #define isci_request_access_task(RequestPtr) \
  578. ((RequestPtr)->ttype_ptr.io_task_ptr)
  579. #define isci_request_access_tmf(RequestPtr) \
  580. ((RequestPtr)->ttype_ptr.tmf_task_ptr)
  581. #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
  582. int isci_request_alloc_tmf(
  583. struct isci_host *isci_host,
  584. struct isci_tmf *isci_tmf,
  585. struct isci_request **isci_request,
  586. struct isci_remote_device *isci_device,
  587. gfp_t gfp_flags);
  588. int isci_request_execute(
  589. struct isci_host *isci_host,
  590. struct sas_task *task,
  591. struct isci_request **request,
  592. gfp_t gfp_flags);
  593. /**
  594. * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
  595. * sgl
  596. * @request: This parameter points to the isci_request object
  597. * @*pdev: This Parameter is the pci_device struct for the controller
  598. *
  599. */
  600. static inline void isci_request_unmap_sgl(
  601. struct isci_request *request,
  602. struct pci_dev *pdev)
  603. {
  604. struct sas_task *task = isci_request_access_task(request);
  605. dev_dbg(&request->isci_host->pdev->dev,
  606. "%s: request = %p, task = %p,\n"
  607. "task->data_dir = %d, is_sata = %d\n ",
  608. __func__,
  609. request,
  610. task,
  611. task->data_dir,
  612. sas_protocol_ata(task->task_proto));
  613. if ((task->data_dir != PCI_DMA_NONE) &&
  614. !sas_protocol_ata(task->task_proto)) {
  615. if (task->num_scatter == 0)
  616. /* 0 indicates a single dma address */
  617. dma_unmap_single(
  618. &pdev->dev,
  619. request->zero_scatter_daddr,
  620. task->total_xfer_len,
  621. task->data_dir
  622. );
  623. else /* unmap the sgl dma addresses */
  624. dma_unmap_sg(
  625. &pdev->dev,
  626. task->scatter,
  627. request->num_sg_entries,
  628. task->data_dir
  629. );
  630. }
  631. }
  632. /**
  633. * isci_request_io_request_get_next_sge() - This function is called by the sci
  634. * core to retrieve the next sge for a given request.
  635. * @request: This parameter is the isci_request object.
  636. * @current_sge_address: This parameter is the last sge retrieved by the sci
  637. * core for this request.
  638. *
  639. * pointer to the next sge for specified request.
  640. */
  641. static inline void *isci_request_io_request_get_next_sge(
  642. struct isci_request *request,
  643. void *current_sge_address)
  644. {
  645. struct sas_task *task = isci_request_access_task(request);
  646. void *ret = NULL;
  647. dev_dbg(&request->isci_host->pdev->dev,
  648. "%s: request = %p, "
  649. "current_sge_address = %p, "
  650. "num_scatter = %d\n",
  651. __func__,
  652. request,
  653. current_sge_address,
  654. task->num_scatter);
  655. if (!current_sge_address) /* First time through.. */
  656. ret = task->scatter; /* always task->scatter */
  657. else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
  658. ret = NULL; /* there is only one element. */
  659. else
  660. ret = sg_next(current_sge_address); /* sg_next returns NULL
  661. * for the last element
  662. */
  663. dev_dbg(&request->isci_host->pdev->dev,
  664. "%s: next sge address = %p\n",
  665. __func__,
  666. ret);
  667. return ret;
  668. }
  669. void isci_terminate_pending_requests(struct isci_host *isci_host,
  670. struct isci_remote_device *isci_device,
  671. enum isci_request_status new_request_state);
  672. enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
  673. struct scic_sds_remote_device *sci_dev,
  674. u16 io_tag,
  675. struct scic_sds_request *sci_req);
  676. enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
  677. enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
  678. enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req);
  679. void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
  680. void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
  681. #endif /* !defined(_ISCI_REQUEST_H_) */