qla_inline.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. /**
  8. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  9. * Continuation Type 1 IOCBs to allocate.
  10. *
  11. * @dsds: number of data segment decriptors needed
  12. *
  13. * Returns the number of IOCB entries needed to store @dsds.
  14. */
  15. static inline uint16_t
  16. qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
  17. {
  18. uint16_t iocbs;
  19. iocbs = 1;
  20. if (dsds > 1) {
  21. iocbs += (dsds - 1) / 5;
  22. if ((dsds - 1) % 5)
  23. iocbs++;
  24. }
  25. return iocbs;
  26. }
  27. /*
  28. * qla2x00_debounce_register
  29. * Debounce register.
  30. *
  31. * Input:
  32. * port = register address.
  33. *
  34. * Returns:
  35. * register value.
  36. */
  37. static __inline__ uint16_t
  38. qla2x00_debounce_register(volatile uint16_t __iomem *addr)
  39. {
  40. volatile uint16_t first;
  41. volatile uint16_t second;
  42. do {
  43. first = RD_REG_WORD(addr);
  44. barrier();
  45. cpu_relax();
  46. second = RD_REG_WORD(addr);
  47. } while (first != second);
  48. return (first);
  49. }
  50. static inline void
  51. qla2x00_poll(struct rsp_que *rsp)
  52. {
  53. unsigned long flags;
  54. struct qla_hw_data *ha = rsp->hw;
  55. local_irq_save(flags);
  56. if (IS_P3P_TYPE(ha))
  57. qla82xx_poll(0, rsp);
  58. else
  59. ha->isp_ops->intr_handler(0, rsp);
  60. local_irq_restore(flags);
  61. }
  62. static inline uint8_t *
  63. host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
  64. {
  65. uint32_t *ifcp = (uint32_t *) fcp;
  66. uint32_t *ofcp = (uint32_t *) fcp;
  67. uint32_t iter = bsize >> 2;
  68. for (; iter ; iter--)
  69. *ofcp++ = swab32(*ifcp++);
  70. return fcp;
  71. }
  72. static inline void
  73. host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
  74. {
  75. uint32_t *isrc = (uint32_t *) src;
  76. __le32 *odest = (__le32 *) dst;
  77. uint32_t iter = bsize >> 2;
  78. for (; iter ; iter--)
  79. *odest++ = cpu_to_le32(*isrc++);
  80. }
  81. static inline void
  82. qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
  83. {
  84. int i;
  85. if (IS_FWI2_CAPABLE(ha))
  86. return;
  87. for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
  88. set_bit(i, ha->loop_id_map);
  89. set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
  90. set_bit(BROADCAST, ha->loop_id_map);
  91. }
  92. static inline int
  93. qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
  94. {
  95. struct qla_hw_data *ha = vha->hw;
  96. if (IS_FWI2_CAPABLE(ha))
  97. return (loop_id > NPH_LAST_HANDLE);
  98. return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
  99. loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
  100. }
  101. static inline void
  102. qla2x00_clear_loop_id(fc_port_t *fcport) {
  103. struct qla_hw_data *ha = fcport->vha->hw;
  104. if (fcport->loop_id == FC_NO_LOOP_ID ||
  105. qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
  106. return;
  107. clear_bit(fcport->loop_id, ha->loop_id_map);
  108. fcport->loop_id = FC_NO_LOOP_ID;
  109. }
  110. static inline void
  111. qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
  112. {
  113. struct dsd_dma *dsd_ptr, *tdsd_ptr;
  114. struct crc_context *ctx;
  115. ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
  116. /* clean up allocated prev pool */
  117. list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
  118. &ctx->dsd_list, list) {
  119. dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
  120. dsd_ptr->dsd_list_dma);
  121. list_del(&dsd_ptr->list);
  122. kfree(dsd_ptr);
  123. }
  124. INIT_LIST_HEAD(&ctx->dsd_list);
  125. }
  126. static inline void
  127. qla2x00_set_fcport_state(fc_port_t *fcport, int state)
  128. {
  129. int old_state;
  130. old_state = atomic_read(&fcport->state);
  131. atomic_set(&fcport->state, state);
  132. /* Don't print state transitions during initial allocation of fcport */
  133. if (old_state && old_state != state) {
  134. ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
  135. "FCPort state transitioned from %s to %s - "
  136. "portid=%02x%02x%02x.\n",
  137. port_state_str[old_state], port_state_str[state],
  138. fcport->d_id.b.domain, fcport->d_id.b.area,
  139. fcport->d_id.b.al_pa);
  140. }
  141. }
  142. static inline int
  143. qla2x00_hba_err_chk_enabled(srb_t *sp)
  144. {
  145. /*
  146. * Uncomment when corresponding SCSI changes are done.
  147. *
  148. if (!sp->cmd->prot_chk)
  149. return 0;
  150. *
  151. */
  152. switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
  153. case SCSI_PROT_READ_STRIP:
  154. case SCSI_PROT_WRITE_INSERT:
  155. if (ql2xenablehba_err_chk >= 1)
  156. return 1;
  157. break;
  158. case SCSI_PROT_READ_PASS:
  159. case SCSI_PROT_WRITE_PASS:
  160. if (ql2xenablehba_err_chk >= 2)
  161. return 1;
  162. break;
  163. case SCSI_PROT_READ_INSERT:
  164. case SCSI_PROT_WRITE_STRIP:
  165. return 1;
  166. }
  167. return 0;
  168. }
  169. static inline int
  170. qla2x00_reset_active(scsi_qla_host_t *vha)
  171. {
  172. scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
  173. /* Test appropriate base-vha and vha flags. */
  174. return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
  175. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  176. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  177. test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  178. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  179. }
  180. static inline srb_t *
  181. qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
  182. {
  183. srb_t *sp = NULL;
  184. struct qla_hw_data *ha = vha->hw;
  185. uint8_t bail;
  186. QLA_VHA_MARK_BUSY(vha, bail);
  187. if (unlikely(bail))
  188. return NULL;
  189. sp = mempool_alloc(ha->srb_mempool, flag);
  190. if (!sp)
  191. goto done;
  192. memset(sp, 0, sizeof(*sp));
  193. sp->fcport = fcport;
  194. sp->iocbs = 1;
  195. done:
  196. if (!sp)
  197. QLA_VHA_MARK_NOT_BUSY(vha);
  198. return sp;
  199. }
  200. static inline void
  201. qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
  202. {
  203. mempool_free(sp, vha->hw->srb_mempool);
  204. QLA_VHA_MARK_NOT_BUSY(vha);
  205. }
  206. static inline void
  207. qla2x00_init_timer(srb_t *sp, unsigned long tmo)
  208. {
  209. init_timer(&sp->u.iocb_cmd.timer);
  210. sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
  211. sp->u.iocb_cmd.timer.data = (unsigned long)sp;
  212. sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
  213. add_timer(&sp->u.iocb_cmd.timer);
  214. sp->free = qla2x00_sp_free;
  215. if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
  216. (sp->type == SRB_FXIOCB_DCMD))
  217. init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
  218. }
  219. static inline int
  220. qla2x00_gid_list_size(struct qla_hw_data *ha)
  221. {
  222. if (IS_QLAFX00(ha))
  223. return sizeof(uint32_t) * 32;
  224. else
  225. return sizeof(struct gid_list_info) * ha->max_fibre_devices;
  226. }
  227. static inline void
  228. qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
  229. {
  230. if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
  231. return;
  232. /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
  233. if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
  234. HOST_QUEUE_RAMPDOWN_INTERVAL)))
  235. return;
  236. /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
  237. if (time_before(jiffies, (vha->hw->host_last_rampup_time +
  238. HOST_QUEUE_RAMPUP_INTERVAL)))
  239. return;
  240. set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
  241. }
  242. static inline void
  243. qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
  244. {
  245. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  246. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  247. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  248. clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  249. complete(&ha->mbx_intr_comp);
  250. }
  251. }