qla_inline.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. /*
  8. * qla2x00_debounce_register
  9. * Debounce register.
  10. *
  11. * Input:
  12. * port = register address.
  13. *
  14. * Returns:
  15. * register value.
  16. */
  17. static __inline__ uint16_t
  18. qla2x00_debounce_register(volatile uint16_t __iomem *addr)
  19. {
  20. volatile uint16_t first;
  21. volatile uint16_t second;
  22. do {
  23. first = RD_REG_WORD(addr);
  24. barrier();
  25. cpu_relax();
  26. second = RD_REG_WORD(addr);
  27. } while (first != second);
  28. return (first);
  29. }
  30. static inline void
  31. qla2x00_poll(struct rsp_que *rsp)
  32. {
  33. unsigned long flags;
  34. struct qla_hw_data *ha = rsp->hw;
  35. local_irq_save(flags);
  36. if (IS_QLA82XX(ha))
  37. qla82xx_poll(0, rsp);
  38. else
  39. ha->isp_ops->intr_handler(0, rsp);
  40. local_irq_restore(flags);
  41. }
  42. static inline uint8_t *
  43. host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
  44. {
  45. uint32_t *ifcp = (uint32_t *) fcp;
  46. uint32_t *ofcp = (uint32_t *) fcp;
  47. uint32_t iter = bsize >> 2;
  48. for (; iter ; iter--)
  49. *ofcp++ = swab32(*ifcp++);
  50. return fcp;
  51. }
  52. static inline void
  53. qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
  54. {
  55. int i;
  56. if (IS_FWI2_CAPABLE(ha))
  57. return;
  58. for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
  59. set_bit(i, ha->loop_id_map);
  60. set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
  61. set_bit(BROADCAST, ha->loop_id_map);
  62. }
  63. static inline int
  64. qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
  65. {
  66. struct qla_hw_data *ha = vha->hw;
  67. if (IS_FWI2_CAPABLE(ha))
  68. return (loop_id > NPH_LAST_HANDLE);
  69. return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
  70. loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
  71. }
  72. static inline void
  73. qla2x00_clear_loop_id(fc_port_t *fcport) {
  74. struct qla_hw_data *ha = fcport->vha->hw;
  75. if (fcport->loop_id == FC_NO_LOOP_ID ||
  76. qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
  77. return;
  78. clear_bit(fcport->loop_id, ha->loop_id_map);
  79. fcport->loop_id = FC_NO_LOOP_ID;
  80. }
  81. static inline void
  82. qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
  83. {
  84. struct dsd_dma *dsd_ptr, *tdsd_ptr;
  85. struct crc_context *ctx;
  86. ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
  87. /* clean up allocated prev pool */
  88. list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
  89. &ctx->dsd_list, list) {
  90. dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
  91. dsd_ptr->dsd_list_dma);
  92. list_del(&dsd_ptr->list);
  93. kfree(dsd_ptr);
  94. }
  95. INIT_LIST_HEAD(&ctx->dsd_list);
  96. }
  97. static inline void
  98. qla2x00_set_fcport_state(fc_port_t *fcport, int state)
  99. {
  100. int old_state;
  101. old_state = atomic_read(&fcport->state);
  102. atomic_set(&fcport->state, state);
  103. /* Don't print state transitions during initial allocation of fcport */
  104. if (old_state && old_state != state) {
  105. ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
  106. "FCPort state transitioned from %s to %s - "
  107. "portid=%02x%02x%02x.\n",
  108. port_state_str[old_state], port_state_str[state],
  109. fcport->d_id.b.domain, fcport->d_id.b.area,
  110. fcport->d_id.b.al_pa);
  111. }
  112. }
  113. static inline int
  114. qla2x00_hba_err_chk_enabled(srb_t *sp)
  115. {
  116. /*
  117. * Uncomment when corresponding SCSI changes are done.
  118. *
  119. if (!sp->cmd->prot_chk)
  120. return 0;
  121. *
  122. */
  123. switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
  124. case SCSI_PROT_READ_STRIP:
  125. case SCSI_PROT_WRITE_INSERT:
  126. if (ql2xenablehba_err_chk >= 1)
  127. return 1;
  128. break;
  129. case SCSI_PROT_READ_PASS:
  130. case SCSI_PROT_WRITE_PASS:
  131. if (ql2xenablehba_err_chk >= 2)
  132. return 1;
  133. break;
  134. case SCSI_PROT_READ_INSERT:
  135. case SCSI_PROT_WRITE_STRIP:
  136. return 1;
  137. }
  138. return 0;
  139. }
  140. static inline int
  141. qla2x00_reset_active(scsi_qla_host_t *vha)
  142. {
  143. scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
  144. /* Test appropriate base-vha and vha flags. */
  145. return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
  146. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  147. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  148. test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  149. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  150. }
  151. static inline srb_t *
  152. qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
  153. {
  154. srb_t *sp = NULL;
  155. struct qla_hw_data *ha = vha->hw;
  156. uint8_t bail;
  157. QLA_VHA_MARK_BUSY(vha, bail);
  158. if (unlikely(bail))
  159. return NULL;
  160. sp = mempool_alloc(ha->srb_mempool, flag);
  161. if (!sp)
  162. goto done;
  163. memset(sp, 0, sizeof(*sp));
  164. sp->fcport = fcport;
  165. sp->iocbs = 1;
  166. done:
  167. if (!sp)
  168. QLA_VHA_MARK_NOT_BUSY(vha);
  169. return sp;
  170. }
  171. static inline void
  172. qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
  173. {
  174. mempool_free(sp, vha->hw->srb_mempool);
  175. QLA_VHA_MARK_NOT_BUSY(vha);
  176. }
  177. static inline void
  178. qla2x00_init_timer(srb_t *sp, unsigned long tmo)
  179. {
  180. init_timer(&sp->u.iocb_cmd.timer);
  181. sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
  182. sp->u.iocb_cmd.timer.data = (unsigned long)sp;
  183. sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
  184. add_timer(&sp->u.iocb_cmd.timer);
  185. sp->free = qla2x00_sp_free;
  186. }
  187. static inline int
  188. qla2x00_gid_list_size(struct qla_hw_data *ha)
  189. {
  190. return sizeof(struct gid_list_info) * ha->max_fibre_devices;
  191. }
  192. static inline void
  193. qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
  194. {
  195. if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
  196. return;
  197. /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
  198. if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
  199. HOST_QUEUE_RAMPDOWN_INTERVAL)))
  200. return;
  201. /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
  202. if (time_before(jiffies, (vha->hw->host_last_rampup_time +
  203. HOST_QUEUE_RAMPUP_INTERVAL)))
  204. return;
  205. set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
  206. }