request.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #ifndef _ISCI_REQUEST_H_
  56. #define _ISCI_REQUEST_H_
  57. #include "isci.h"
  58. #include "host.h"
  59. #include "scu_task_context.h"
  60. #include "stp_request.h"
  61. /**
  62. * struct isci_request_status - This enum defines the possible states of an I/O
  63. * request.
  64. *
  65. *
  66. */
  67. enum isci_request_status {
  68. unallocated = 0x00,
  69. allocated = 0x01,
  70. started = 0x02,
  71. completed = 0x03,
  72. aborting = 0x04,
  73. aborted = 0x05,
  74. terminating = 0x06,
  75. dead = 0x07
  76. };
  77. enum task_type {
  78. io_task = 0,
  79. tmf_task = 1
  80. };
  81. enum sci_request_protocol {
  82. SCIC_NO_PROTOCOL,
  83. SCIC_SMP_PROTOCOL,
  84. SCIC_SSP_PROTOCOL,
  85. SCIC_STP_PROTOCOL
  86. }; /* XXX remove me, use sas_task.dev instead */;
  87. struct scic_sds_request {
  88. /**
  89. * This field contains the information for the base request state machine.
  90. */
  91. struct sci_base_state_machine state_machine;
  92. /**
  93. * This field simply points to the controller to which this IO request
  94. * is associated.
  95. */
  96. struct scic_sds_controller *owning_controller;
  97. /**
  98. * This field simply points to the remote device to which this IO request
  99. * is associated.
  100. */
  101. struct scic_sds_remote_device *target_device;
  102. /**
  103. * This field is utilized to determine if the SCI user is managing
  104. * the IO tag for this request or if the core is managing it.
  105. */
  106. bool was_tag_assigned_by_user;
  107. /**
  108. * This field indicates the IO tag for this request. The IO tag is
  109. * comprised of the task_index and a sequence count. The sequence count
  110. * is utilized to help identify tasks from one life to another.
  111. */
  112. u16 io_tag;
  113. /**
  114. * This field specifies the protocol being utilized for this
  115. * IO request.
  116. */
  117. enum sci_request_protocol protocol;
  118. /**
  119. * This field indicates the completion status taken from the SCUs
  120. * completion code. It indicates the completion result for the SCU hardware.
  121. */
  122. u32 scu_status;
  123. /**
  124. * This field indicates the completion status returned to the SCI user. It
  125. * indicates the users view of the io request completion.
  126. */
  127. u32 sci_status;
  128. /**
  129. * This field contains the value to be utilized when posting (e.g. Post_TC,
  130. * Post_TC_Abort) this request to the silicon.
  131. */
  132. u32 post_context;
  133. struct scu_task_context *task_context_buffer;
  134. struct scu_task_context tc ____cacheline_aligned;
  135. /* could be larger with sg chaining */
  136. #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
  137. struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
  138. /**
  139. * This field indicates if this request is a task management request or
  140. * normal IO request.
  141. */
  142. bool is_task_management_request;
  143. /**
  144. * This field indicates that this request contains an initialized started
  145. * substate machine.
  146. */
  147. bool has_started_substate_machine;
  148. /**
  149. * This field is a pointer to the stored rx frame data. It is used in STP
  150. * internal requests and SMP response frames. If this field is non-NULL the
  151. * saved frame must be released on IO request completion.
  152. *
  153. * @todo In the future do we want to keep a list of RX frame buffers?
  154. */
  155. u32 saved_rx_frame_index;
  156. /**
  157. * This field specifies the data necessary to manage the sub-state
  158. * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state.
  159. */
  160. struct sci_base_state_machine started_substate_machine;
  161. /**
  162. * This field specifies the current state handlers in place for this
  163. * IO Request object. This field is updated each time the request
  164. * changes state.
  165. */
  166. const struct scic_sds_io_request_state_handler *state_handlers;
  167. /**
  168. * This field in the recorded device sequence for the io request. This is
  169. * recorded during the build operation and is compared in the start
  170. * operation. If the sequence is different then there was a change of
  171. * devices from the build to start operations.
  172. */
  173. u8 device_sequence;
  174. union {
  175. struct {
  176. union {
  177. struct ssp_cmd_iu cmd;
  178. struct ssp_task_iu tmf;
  179. };
  180. union {
  181. struct ssp_response_iu rsp;
  182. u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
  183. };
  184. } ssp;
  185. struct {
  186. struct smp_req cmd;
  187. struct smp_resp rsp;
  188. } smp;
  189. struct {
  190. struct scic_sds_stp_request req;
  191. struct host_to_dev_fis cmd;
  192. struct dev_to_host_fis rsp;
  193. } stp;
  194. };
  195. };
  196. static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
  197. {
  198. struct scic_sds_request *sci_req;
  199. sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
  200. return sci_req;
  201. }
  202. struct isci_request {
  203. enum isci_request_status status;
  204. enum task_type ttype;
  205. unsigned short io_tag;
  206. bool complete_in_target;
  207. bool terminated;
  208. union ttype_ptr_union {
  209. struct sas_task *io_task_ptr; /* When ttype==io_task */
  210. struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
  211. } ttype_ptr;
  212. struct isci_host *isci_host;
  213. struct isci_remote_device *isci_device;
  214. /* For use in the requests_to_{complete|abort} lists: */
  215. struct list_head completed_node;
  216. /* For use in the reqs_in_process list: */
  217. struct list_head dev_node;
  218. spinlock_t state_lock;
  219. dma_addr_t request_daddr;
  220. dma_addr_t zero_scatter_daddr;
  221. unsigned int num_sg_entries; /* returned by pci_alloc_sg */
  222. /** Note: "io_request_completion" is completed in two different ways
  223. * depending on whether this is a TMF or regular request.
  224. * - TMF requests are completed in the thread that started them;
  225. * - regular requests are completed in the request completion callback
  226. * function.
  227. * This difference in operation allows the aborter of a TMF request
  228. * to be sure that once the TMF request completes, the I/O that the
  229. * TMF was aborting is guaranteed to have completed.
  230. */
  231. struct completion *io_request_completion;
  232. struct scic_sds_request sci;
  233. };
  234. static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
  235. {
  236. struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
  237. return ireq;
  238. }
  239. /**
  240. * enum sci_base_request_states - This enumeration depicts all the states for
  241. * the common request state machine.
  242. *
  243. *
  244. */
  245. enum sci_base_request_states {
  246. /**
  247. * Simply the initial state for the base request state machine.
  248. */
  249. SCI_BASE_REQUEST_STATE_INITIAL,
  250. /**
  251. * This state indicates that the request has been constructed. This state
  252. * is entered from the INITIAL state.
  253. */
  254. SCI_BASE_REQUEST_STATE_CONSTRUCTED,
  255. /**
  256. * This state indicates that the request has been started. This state is
  257. * entered from the CONSTRUCTED state.
  258. */
  259. SCI_BASE_REQUEST_STATE_STARTED,
  260. /**
  261. * This state indicates that the request has completed.
  262. * This state is entered from the STARTED state. This state is entered from
  263. * the ABORTING state.
  264. */
  265. SCI_BASE_REQUEST_STATE_COMPLETED,
  266. /**
  267. * This state indicates that the request is in the process of being
  268. * terminated/aborted.
  269. * This state is entered from the CONSTRUCTED state.
  270. * This state is entered from the STARTED state.
  271. */
  272. SCI_BASE_REQUEST_STATE_ABORTING,
  273. /**
  274. * Simply the final state for the base request state machine.
  275. */
  276. SCI_BASE_REQUEST_STATE_FINAL,
  277. };
  278. typedef enum sci_status (*scic_sds_io_request_handler_t)
  279. (struct scic_sds_request *request);
  280. typedef enum sci_status (*scic_sds_io_request_frame_handler_t)
  281. (struct scic_sds_request *req, u32 frame);
  282. typedef enum sci_status (*scic_sds_io_request_event_handler_t)
  283. (struct scic_sds_request *req, u32 event);
  284. typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t)
  285. (struct scic_sds_request *req, u32 completion_code);
  286. /**
  287. * struct scic_sds_io_request_state_handler - This is the SDS core definition
  288. * of the state handlers.
  289. *
  290. *
  291. */
  292. struct scic_sds_io_request_state_handler {
  293. /**
  294. * The start_handler specifies the method invoked when a user attempts to
  295. * start a request.
  296. */
  297. scic_sds_io_request_handler_t start_handler;
  298. /**
  299. * The abort_handler specifies the method invoked when a user attempts to
  300. * abort a request.
  301. */
  302. scic_sds_io_request_handler_t abort_handler;
  303. /**
  304. * The complete_handler specifies the method invoked when a user attempts to
  305. * complete a request.
  306. */
  307. scic_sds_io_request_handler_t complete_handler;
  308. scic_sds_io_request_task_completion_handler_t tc_completion_handler;
  309. scic_sds_io_request_event_handler_t event_handler;
  310. scic_sds_io_request_frame_handler_t frame_handler;
  311. };
  312. extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[];
  313. /**
  314. * scic_sds_request_get_controller() -
  315. *
  316. * This macro will return the controller for this io request object
  317. */
  318. #define scic_sds_request_get_controller(sci_req) \
  319. ((sci_req)->owning_controller)
  320. /**
  321. * scic_sds_request_get_device() -
  322. *
  323. * This macro will return the device for this io request object
  324. */
  325. #define scic_sds_request_get_device(sci_req) \
  326. ((sci_req)->target_device)
  327. /**
  328. * scic_sds_request_get_port() -
  329. *
  330. * This macro will return the port for this io request object
  331. */
  332. #define scic_sds_request_get_port(sci_req) \
  333. scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
  334. /**
  335. * scic_sds_request_get_post_context() -
  336. *
  337. * This macro returns the constructed post context result for the io request.
  338. */
  339. #define scic_sds_request_get_post_context(sci_req) \
  340. ((sci_req)->post_context)
  341. /**
  342. * scic_sds_request_get_task_context() -
  343. *
  344. * This is a helper macro to return the os handle for this request object.
  345. */
  346. #define scic_sds_request_get_task_context(request) \
  347. ((request)->task_context_buffer)
  348. /**
  349. * scic_sds_request_set_status() -
  350. *
  351. * This macro will set the scu hardware status and sci request completion
  352. * status for an io request.
  353. */
  354. #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
  355. { \
  356. (request)->scu_status = (scu_status_code); \
  357. (request)->sci_status = (sci_status_code); \
  358. }
  359. #define scic_sds_request_complete(a_request) \
  360. ((a_request)->state_handlers->complete_handler(a_request))
  361. extern enum sci_status
  362. scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code);
  363. /**
  364. * SCU_SGL_ZERO() -
  365. *
  366. * This macro zeros the hardware SGL element data
  367. */
  368. #define SCU_SGL_ZERO(scu_sge) \
  369. { \
  370. (scu_sge).length = 0; \
  371. (scu_sge).address_lower = 0; \
  372. (scu_sge).address_upper = 0; \
  373. (scu_sge).address_modifier = 0; \
  374. }
  375. /**
  376. * SCU_SGL_COPY() -
  377. *
  378. * This macro copys the SGL Element data from the host os to the hardware SGL
  379. * elment data
  380. */
  381. #define SCU_SGL_COPY(scu_sge, os_sge) \
  382. { \
  383. (scu_sge).length = sg_dma_len(sg); \
  384. (scu_sge).address_upper = \
  385. upper_32_bits(sg_dma_address(sg)); \
  386. (scu_sge).address_lower = \
  387. lower_32_bits(sg_dma_address(sg)); \
  388. (scu_sge).address_modifier = 0; \
  389. }
  390. void scic_sds_request_build_sgl(struct scic_sds_request *sci_req);
  391. void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req);
  392. void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req);
  393. enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
  394. enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
  395. void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req);
  396. enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
  397. u32 event_code);
  398. enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
  399. u32 frame_index);
  400. enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
  401. enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req);
  402. /**
  403. * enum _scic_sds_io_request_started_task_mgmt_substates - This enumeration
  404. * depicts all of the substates for a task management request to be
  405. * performed in the STARTED super-state.
  406. *
  407. *
  408. */
  409. enum scic_sds_raw_request_started_task_mgmt_substates {
  410. /**
  411. * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
  412. * task management request is waiting for the transmission of the
  413. * initial frame (i.e. command, task, etc.).
  414. */
  415. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
  416. /**
  417. * This sub-state indicates that the started task management request
  418. * is waiting for the reception of an unsolicited frame
  419. * (i.e. response IU).
  420. */
  421. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
  422. };
  423. /**
  424. * enum _scic_sds_smp_request_started_substates - This enumeration depicts all
  425. * of the substates for a SMP request to be performed in the STARTED
  426. * super-state.
  427. *
  428. *
  429. */
  430. enum scic_sds_smp_request_started_substates {
  431. /**
  432. * This sub-state indicates that the started task management request
  433. * is waiting for the reception of an unsolicited frame
  434. * (i.e. response IU).
  435. */
  436. SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
  437. /**
  438. * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
  439. * waiting for the transmission of the initial frame (i.e. command, task, etc.).
  440. */
  441. SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
  442. };
  443. /* XXX open code in caller */
  444. static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
  445. dma_addr_t phys_addr)
  446. {
  447. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  448. dma_addr_t offset;
  449. BUG_ON(phys_addr < ireq->request_daddr);
  450. offset = phys_addr - ireq->request_daddr;
  451. BUG_ON(offset >= sizeof(*ireq));
  452. return (char *)ireq + offset;
  453. }
  454. /* XXX open code in caller */
  455. static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
  456. void *virt_addr)
  457. {
  458. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  459. char *requested_addr = (char *)virt_addr;
  460. char *base_addr = (char *)ireq;
  461. BUG_ON(requested_addr < base_addr);
  462. BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
  463. return ireq->request_daddr + (requested_addr - base_addr);
  464. }
  465. /**
  466. * This function gets the status of the request object.
  467. * @request: This parameter points to the isci_request object
  468. *
  469. * status of the object as a isci_request_status enum.
  470. */
  471. static inline
  472. enum isci_request_status isci_request_get_state(
  473. struct isci_request *isci_request)
  474. {
  475. BUG_ON(isci_request == NULL);
  476. /*probably a bad sign... */
  477. if (isci_request->status == unallocated)
  478. dev_warn(&isci_request->isci_host->pdev->dev,
  479. "%s: isci_request->status == unallocated\n",
  480. __func__);
  481. return isci_request->status;
  482. }
  483. /**
  484. * isci_request_change_state() - This function sets the status of the request
  485. * object.
  486. * @request: This parameter points to the isci_request object
  487. * @status: This Parameter is the new status of the object
  488. *
  489. */
  490. static inline enum isci_request_status isci_request_change_state(
  491. struct isci_request *isci_request,
  492. enum isci_request_status status)
  493. {
  494. enum isci_request_status old_state;
  495. unsigned long flags;
  496. dev_dbg(&isci_request->isci_host->pdev->dev,
  497. "%s: isci_request = %p, state = 0x%x\n",
  498. __func__,
  499. isci_request,
  500. status);
  501. BUG_ON(isci_request == NULL);
  502. spin_lock_irqsave(&isci_request->state_lock, flags);
  503. old_state = isci_request->status;
  504. isci_request->status = status;
  505. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  506. return old_state;
  507. }
  508. /**
  509. * isci_request_change_started_to_newstate() - This function sets the status of
  510. * the request object.
  511. * @request: This parameter points to the isci_request object
  512. * @status: This Parameter is the new status of the object
  513. *
  514. * state previous to any change.
  515. */
  516. static inline enum isci_request_status isci_request_change_started_to_newstate(
  517. struct isci_request *isci_request,
  518. struct completion *completion_ptr,
  519. enum isci_request_status newstate)
  520. {
  521. enum isci_request_status old_state;
  522. unsigned long flags;
  523. spin_lock_irqsave(&isci_request->state_lock, flags);
  524. old_state = isci_request->status;
  525. if (old_state == started || old_state == aborting) {
  526. BUG_ON(isci_request->io_request_completion != NULL);
  527. isci_request->io_request_completion = completion_ptr;
  528. isci_request->status = newstate;
  529. }
  530. spin_unlock_irqrestore(&isci_request->state_lock, flags);
  531. dev_dbg(&isci_request->isci_host->pdev->dev,
  532. "%s: isci_request = %p, old_state = 0x%x\n",
  533. __func__,
  534. isci_request,
  535. old_state);
  536. return old_state;
  537. }
  538. /**
  539. * isci_request_change_started_to_aborted() - This function sets the status of
  540. * the request object.
  541. * @request: This parameter points to the isci_request object
  542. * @completion_ptr: This parameter is saved as the kernel completion structure
  543. * signalled when the old request completes.
  544. *
  545. * state previous to any change.
  546. */
  547. static inline enum isci_request_status isci_request_change_started_to_aborted(
  548. struct isci_request *isci_request,
  549. struct completion *completion_ptr)
  550. {
  551. return isci_request_change_started_to_newstate(
  552. isci_request, completion_ptr, aborted
  553. );
  554. }
  555. /**
  556. * isci_request_free() - This function frees the request object.
  557. * @isci_host: This parameter specifies the ISCI host object
  558. * @isci_request: This parameter points to the isci_request object
  559. *
  560. */
  561. static inline void isci_request_free(
  562. struct isci_host *isci_host,
  563. struct isci_request *isci_request)
  564. {
  565. if (!isci_request)
  566. return;
  567. /* release the dma memory if we fail. */
  568. dma_pool_free(isci_host->dma_pool, isci_request,
  569. isci_request->request_daddr);
  570. }
  571. /* #define ISCI_REQUEST_VALIDATE_ACCESS
  572. */
  573. #ifdef ISCI_REQUEST_VALIDATE_ACCESS
  574. static inline
  575. struct sas_task *isci_request_access_task(struct isci_request *isci_request)
  576. {
  577. BUG_ON(isci_request->ttype != io_task);
  578. return isci_request->ttype_ptr.io_task_ptr;
  579. }
  580. static inline
  581. struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
  582. {
  583. BUG_ON(isci_request->ttype != tmf_task);
  584. return isci_request->ttype_ptr.tmf_task_ptr;
  585. }
  586. #else /* not ISCI_REQUEST_VALIDATE_ACCESS */
  587. #define isci_request_access_task(RequestPtr) \
  588. ((RequestPtr)->ttype_ptr.io_task_ptr)
  589. #define isci_request_access_tmf(RequestPtr) \
  590. ((RequestPtr)->ttype_ptr.tmf_task_ptr)
  591. #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
  592. int isci_request_alloc_tmf(
  593. struct isci_host *isci_host,
  594. struct isci_tmf *isci_tmf,
  595. struct isci_request **isci_request,
  596. struct isci_remote_device *isci_device,
  597. gfp_t gfp_flags);
  598. int isci_request_execute(
  599. struct isci_host *isci_host,
  600. struct sas_task *task,
  601. struct isci_request **request,
  602. gfp_t gfp_flags);
  603. /**
  604. * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
  605. * sgl
  606. * @request: This parameter points to the isci_request object
  607. * @*pdev: This Parameter is the pci_device struct for the controller
  608. *
  609. */
  610. static inline void isci_request_unmap_sgl(
  611. struct isci_request *request,
  612. struct pci_dev *pdev)
  613. {
  614. struct sas_task *task = isci_request_access_task(request);
  615. dev_dbg(&request->isci_host->pdev->dev,
  616. "%s: request = %p, task = %p,\n"
  617. "task->data_dir = %d, is_sata = %d\n ",
  618. __func__,
  619. request,
  620. task,
  621. task->data_dir,
  622. sas_protocol_ata(task->task_proto));
  623. if ((task->data_dir != PCI_DMA_NONE) &&
  624. !sas_protocol_ata(task->task_proto)) {
  625. if (task->num_scatter == 0)
  626. /* 0 indicates a single dma address */
  627. dma_unmap_single(
  628. &pdev->dev,
  629. request->zero_scatter_daddr,
  630. task->total_xfer_len,
  631. task->data_dir
  632. );
  633. else /* unmap the sgl dma addresses */
  634. dma_unmap_sg(
  635. &pdev->dev,
  636. task->scatter,
  637. request->num_sg_entries,
  638. task->data_dir
  639. );
  640. }
  641. }
  642. /**
  643. * isci_request_io_request_get_next_sge() - This function is called by the sci
  644. * core to retrieve the next sge for a given request.
  645. * @request: This parameter is the isci_request object.
  646. * @current_sge_address: This parameter is the last sge retrieved by the sci
  647. * core for this request.
  648. *
  649. * pointer to the next sge for specified request.
  650. */
  651. static inline void *isci_request_io_request_get_next_sge(
  652. struct isci_request *request,
  653. void *current_sge_address)
  654. {
  655. struct sas_task *task = isci_request_access_task(request);
  656. void *ret = NULL;
  657. dev_dbg(&request->isci_host->pdev->dev,
  658. "%s: request = %p, "
  659. "current_sge_address = %p, "
  660. "num_scatter = %d\n",
  661. __func__,
  662. request,
  663. current_sge_address,
  664. task->num_scatter);
  665. if (!current_sge_address) /* First time through.. */
  666. ret = task->scatter; /* always task->scatter */
  667. else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
  668. ret = NULL; /* there is only one element. */
  669. else
  670. ret = sg_next(current_sge_address); /* sg_next returns NULL
  671. * for the last element
  672. */
  673. dev_dbg(&request->isci_host->pdev->dev,
  674. "%s: next sge address = %p\n",
  675. __func__,
  676. ret);
  677. return ret;
  678. }
  679. void isci_terminate_pending_requests(struct isci_host *isci_host,
  680. struct isci_remote_device *isci_device,
  681. enum isci_request_status new_request_state);
  682. enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
  683. struct scic_sds_remote_device *sci_dev,
  684. u16 io_tag,
  685. struct scic_sds_request *sci_req);
  686. enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
  687. enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
  688. enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req);
  689. void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
  690. void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
  691. #endif /* !defined(_ISCI_REQUEST_H_) */