ufshcd.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703
  1. /*
  2. * Universal Flash Storage Host controller driver Core
  3. *
  4. * This code is based on drivers/scsi/ufs/ufshcd.c
  5. * Copyright (C) 2011-2013 Samsung India Software Operations
  6. *
  7. * Authors:
  8. * Santosh Yaraganavi <santosh.sy@samsung.com>
  9. * Vinayak Holikatti <h.vinayak@samsung.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. * See the COPYING file in the top-level directory or visit
  16. * <http://www.gnu.org/licenses/gpl-2.0.html>
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * This program is provided "AS IS" and "WITH ALL FAULTS" and
  24. * without warranty of any kind. You are solely responsible for
  25. * determining the appropriateness of using and distributing
  26. * the program and assume all risks associated with your exercise
  27. * of rights with respect to the program, including but not limited
  28. * to infringement of third party rights, the risks and costs of
  29. * program errors, damage to or loss of data, programs or equipment,
  30. * and unavailability or interruption of operations. Under no
  31. * circumstances will the contributor of this Program be liable for
  32. * any damages of any kind arising from your use or distribution of
  33. * this program.
  34. */
  35. #include "ufshcd.h"
  36. enum {
  37. UFSHCD_MAX_CHANNEL = 0,
  38. UFSHCD_MAX_ID = 1,
  39. UFSHCD_MAX_LUNS = 8,
  40. UFSHCD_CMD_PER_LUN = 32,
  41. UFSHCD_CAN_QUEUE = 32,
  42. };
  43. /* UFSHCD states */
  44. enum {
  45. UFSHCD_STATE_OPERATIONAL,
  46. UFSHCD_STATE_RESET,
  47. UFSHCD_STATE_ERROR,
  48. };
  49. /* Interrupt configuration options */
  50. enum {
  51. UFSHCD_INT_DISABLE,
  52. UFSHCD_INT_ENABLE,
  53. UFSHCD_INT_CLEAR,
  54. };
  55. /* Interrupt aggregation options */
  56. enum {
  57. INT_AGGR_RESET,
  58. INT_AGGR_CONFIG,
  59. };
  60. /**
  61. * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  62. * @hba - Pointer to adapter instance
  63. *
  64. * Returns UFSHCI version supported by the controller
  65. */
  66. static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  67. {
  68. return ufshcd_readl(hba, REG_UFS_VERSION);
  69. }
  70. /**
  71. * ufshcd_is_device_present - Check if any device connected to
  72. * the host controller
  73. * @reg_hcs - host controller status register value
  74. *
  75. * Returns 1 if device present, 0 if no device detected
  76. */
  77. static inline int ufshcd_is_device_present(u32 reg_hcs)
  78. {
  79. return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
  80. }
  81. /**
  82. * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
  83. * @lrb: pointer to local command reference block
  84. *
  85. * This function is used to get the OCS field from UTRD
  86. * Returns the OCS field in the UTRD
  87. */
  88. static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  89. {
  90. return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
  91. }
  92. /**
  93. * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
  94. * @task_req_descp: pointer to utp_task_req_desc structure
  95. *
  96. * This function is used to get the OCS field from UTMRD
  97. * Returns the OCS field in the UTMRD
  98. */
  99. static inline int
  100. ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
  101. {
  102. return task_req_descp->header.dword_2 & MASK_OCS;
  103. }
  104. /**
  105. * ufshcd_get_tm_free_slot - get a free slot for task management request
  106. * @hba: per adapter instance
  107. *
  108. * Returns maximum number of task management request slots in case of
  109. * task management queue full or returns the free slot number
  110. */
  111. static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
  112. {
  113. return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
  114. }
  115. /**
  116. * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  117. * @hba: per adapter instance
  118. * @pos: position of the bit to be cleared
  119. */
  120. static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
  121. {
  122. ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  123. }
  124. /**
  125. * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  126. * @reg: Register value of host controller status
  127. *
  128. * Returns integer, 0 on Success and positive value if failed
  129. */
  130. static inline int ufshcd_get_lists_status(u32 reg)
  131. {
  132. /*
  133. * The mask 0xFF is for the following HCS register bits
  134. * Bit Description
  135. * 0 Device Present
  136. * 1 UTRLRDY
  137. * 2 UTMRLRDY
  138. * 3 UCRDY
  139. * 4 HEI
  140. * 5 DEI
  141. * 6-7 reserved
  142. */
  143. return (((reg) & (0xFF)) >> 1) ^ (0x07);
  144. }
  145. /**
  146. * ufshcd_get_uic_cmd_result - Get the UIC command result
  147. * @hba: Pointer to adapter instance
  148. *
  149. * This function gets the result of UIC command completion
  150. * Returns 0 on success, non zero value on error
  151. */
  152. static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
  153. {
  154. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
  155. MASK_UIC_COMMAND_RESULT;
  156. }
  157. /**
  158. * ufshcd_free_hba_memory - Free allocated memory for LRB, request
  159. * and task lists
  160. * @hba: Pointer to adapter instance
  161. */
  162. static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
  163. {
  164. size_t utmrdl_size, utrdl_size, ucdl_size;
  165. kfree(hba->lrb);
  166. if (hba->utmrdl_base_addr) {
  167. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  168. dma_free_coherent(hba->dev, utmrdl_size,
  169. hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
  170. }
  171. if (hba->utrdl_base_addr) {
  172. utrdl_size =
  173. (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  174. dma_free_coherent(hba->dev, utrdl_size,
  175. hba->utrdl_base_addr, hba->utrdl_dma_addr);
  176. }
  177. if (hba->ucdl_base_addr) {
  178. ucdl_size =
  179. (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  180. dma_free_coherent(hba->dev, ucdl_size,
  181. hba->ucdl_base_addr, hba->ucdl_dma_addr);
  182. }
  183. }
  184. /**
  185. * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
  186. * @ucd_rsp_ptr: pointer to response UPIU
  187. *
  188. * This function checks the response UPIU for valid transaction type in
  189. * response field
  190. * Returns 0 on success, non-zero on failure
  191. */
  192. static inline int
  193. ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
  194. {
  195. return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
  196. UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
  197. }
  198. /**
  199. * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
  200. * @ucd_rsp_ptr: pointer to response UPIU
  201. *
  202. * This function gets the response status and scsi_status from response UPIU
  203. * Returns the response result code.
  204. */
  205. static inline int
  206. ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
  207. {
  208. return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
  209. }
  210. /**
  211. * ufshcd_config_int_aggr - Configure interrupt aggregation values.
  212. * Currently there is no use case where we want to configure
  213. * interrupt aggregation dynamically. So to configure interrupt
  214. * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
  215. * INT_AGGR_TIMEOUT_VALUE are used.
  216. * @hba: per adapter instance
  217. * @option: Interrupt aggregation option
  218. */
  219. static inline void
  220. ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
  221. {
  222. switch (option) {
  223. case INT_AGGR_RESET:
  224. ufshcd_writel(hba, INT_AGGR_ENABLE |
  225. INT_AGGR_COUNTER_AND_TIMER_RESET,
  226. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  227. break;
  228. case INT_AGGR_CONFIG:
  229. ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
  230. INT_AGGR_COUNTER_THRESHOLD_VALUE |
  231. INT_AGGR_TIMEOUT_VALUE,
  232. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  233. break;
  234. }
  235. }
  236. /**
  237. * ufshcd_enable_run_stop_reg - Enable run-stop registers,
  238. * When run-stop registers are set to 1, it indicates the
  239. * host controller that it can process the requests
  240. * @hba: per adapter instance
  241. */
  242. static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
  243. {
  244. ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
  245. REG_UTP_TASK_REQ_LIST_RUN_STOP);
  246. ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
  247. REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
  248. }
  249. /**
  250. * ufshcd_hba_start - Start controller initialization sequence
  251. * @hba: per adapter instance
  252. */
  253. static inline void ufshcd_hba_start(struct ufs_hba *hba)
  254. {
  255. ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
  256. }
  257. /**
  258. * ufshcd_is_hba_active - Get controller state
  259. * @hba: per adapter instance
  260. *
  261. * Returns zero if controller is active, 1 otherwise
  262. */
  263. static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
  264. {
  265. return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
  266. }
  267. /**
  268. * ufshcd_send_command - Send SCSI or device management commands
  269. * @hba: per adapter instance
  270. * @task_tag: Task tag of the command
  271. */
  272. static inline
  273. void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  274. {
  275. __set_bit(task_tag, &hba->outstanding_reqs);
  276. ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  277. }
  278. /**
  279. * ufshcd_copy_sense_data - Copy sense data in case of check condition
  280. * @lrb - pointer to local reference block
  281. */
  282. static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
  283. {
  284. int len;
  285. if (lrbp->sense_buffer) {
  286. len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
  287. memcpy(lrbp->sense_buffer,
  288. lrbp->ucd_rsp_ptr->sense_data,
  289. min_t(int, len, SCSI_SENSE_BUFFERSIZE));
  290. }
  291. }
  292. /**
  293. * ufshcd_hba_capabilities - Read controller capabilities
  294. * @hba: per adapter instance
  295. */
  296. static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
  297. {
  298. hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
  299. /* nutrs and nutmrs are 0 based values */
  300. hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
  301. hba->nutmrs =
  302. ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
  303. }
  304. /**
  305. * ufshcd_send_uic_command - Send UIC commands to unipro layers
  306. * @hba: per adapter instance
  307. * @uic_command: UIC command
  308. */
  309. static inline void
  310. ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
  311. {
  312. /* Write Args */
  313. ufshcd_writel(hba, uic_cmnd->argument1, REG_UIC_COMMAND_ARG_1);
  314. ufshcd_writel(hba, uic_cmnd->argument2, REG_UIC_COMMAND_ARG_2);
  315. ufshcd_writel(hba, uic_cmnd->argument3, REG_UIC_COMMAND_ARG_3);
  316. /* Write UIC Cmd */
  317. ufshcd_writel(hba, uic_cmnd->command & COMMAND_OPCODE_MASK,
  318. REG_UIC_COMMAND);
  319. }
  320. /**
  321. * ufshcd_map_sg - Map scatter-gather list to prdt
  322. * @lrbp - pointer to local reference block
  323. *
  324. * Returns 0 in case of success, non-zero value in case of failure
  325. */
  326. static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
  327. {
  328. struct ufshcd_sg_entry *prd_table;
  329. struct scatterlist *sg;
  330. struct scsi_cmnd *cmd;
  331. int sg_segments;
  332. int i;
  333. cmd = lrbp->cmd;
  334. sg_segments = scsi_dma_map(cmd);
  335. if (sg_segments < 0)
  336. return sg_segments;
  337. if (sg_segments) {
  338. lrbp->utr_descriptor_ptr->prd_table_length =
  339. cpu_to_le16((u16) (sg_segments));
  340. prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
  341. scsi_for_each_sg(cmd, sg, sg_segments, i) {
  342. prd_table[i].size =
  343. cpu_to_le32(((u32) sg_dma_len(sg))-1);
  344. prd_table[i].base_addr =
  345. cpu_to_le32(lower_32_bits(sg->dma_address));
  346. prd_table[i].upper_addr =
  347. cpu_to_le32(upper_32_bits(sg->dma_address));
  348. }
  349. } else {
  350. lrbp->utr_descriptor_ptr->prd_table_length = 0;
  351. }
  352. return 0;
  353. }
  354. /**
  355. * ufshcd_int_config - enable/disable interrupts
  356. * @hba: per adapter instance
  357. * @option: interrupt option
  358. */
  359. static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
  360. {
  361. switch (option) {
  362. case UFSHCD_INT_ENABLE:
  363. ufshcd_writel(hba, hba->int_enable_mask, REG_INTERRUPT_ENABLE);
  364. break;
  365. case UFSHCD_INT_DISABLE:
  366. if (hba->ufs_version == UFSHCI_VERSION_10)
  367. ufshcd_writel(hba, INTERRUPT_DISABLE_MASK_10,
  368. REG_INTERRUPT_ENABLE);
  369. else
  370. ufshcd_writel(hba, INTERRUPT_DISABLE_MASK_11,
  371. REG_INTERRUPT_ENABLE);
  372. break;
  373. }
  374. }
  375. /**
  376. * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
  377. * @lrb - pointer to local reference block
  378. */
  379. static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
  380. {
  381. struct utp_transfer_req_desc *req_desc;
  382. struct utp_upiu_cmd *ucd_cmd_ptr;
  383. u32 data_direction;
  384. u32 upiu_flags;
  385. ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
  386. req_desc = lrbp->utr_descriptor_ptr;
  387. switch (lrbp->command_type) {
  388. case UTP_CMD_TYPE_SCSI:
  389. if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  390. data_direction = UTP_DEVICE_TO_HOST;
  391. upiu_flags = UPIU_CMD_FLAGS_READ;
  392. } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  393. data_direction = UTP_HOST_TO_DEVICE;
  394. upiu_flags = UPIU_CMD_FLAGS_WRITE;
  395. } else {
  396. data_direction = UTP_NO_DATA_TRANSFER;
  397. upiu_flags = UPIU_CMD_FLAGS_NONE;
  398. }
  399. /* Transfer request descriptor header fields */
  400. req_desc->header.dword_0 =
  401. cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
  402. /*
  403. * assigning invalid value for command status. Controller
  404. * updates OCS on command completion, with the command
  405. * status
  406. */
  407. req_desc->header.dword_2 =
  408. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  409. /* command descriptor fields */
  410. ucd_cmd_ptr->header.dword_0 =
  411. cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
  412. upiu_flags,
  413. lrbp->lun,
  414. lrbp->task_tag));
  415. ucd_cmd_ptr->header.dword_1 =
  416. cpu_to_be32(
  417. UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
  418. 0,
  419. 0,
  420. 0));
  421. /* Total EHS length and Data segment length will be zero */
  422. ucd_cmd_ptr->header.dword_2 = 0;
  423. ucd_cmd_ptr->exp_data_transfer_len =
  424. cpu_to_be32(lrbp->cmd->sdb.length);
  425. memcpy(ucd_cmd_ptr->cdb,
  426. lrbp->cmd->cmnd,
  427. (min_t(unsigned short,
  428. lrbp->cmd->cmd_len,
  429. MAX_CDB_SIZE)));
  430. break;
  431. case UTP_CMD_TYPE_DEV_MANAGE:
  432. /* For query function implementation */
  433. break;
  434. case UTP_CMD_TYPE_UFS:
  435. /* For UFS native command implementation */
  436. break;
  437. } /* end of switch */
  438. }
  439. /**
  440. * ufshcd_queuecommand - main entry point for SCSI requests
  441. * @cmd: command from SCSI Midlayer
  442. * @done: call back function
  443. *
  444. * Returns 0 for success, non-zero in case of failure
  445. */
  446. static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  447. {
  448. struct ufshcd_lrb *lrbp;
  449. struct ufs_hba *hba;
  450. unsigned long flags;
  451. int tag;
  452. int err = 0;
  453. hba = shost_priv(host);
  454. tag = cmd->request->tag;
  455. if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
  456. err = SCSI_MLQUEUE_HOST_BUSY;
  457. goto out;
  458. }
  459. lrbp = &hba->lrb[tag];
  460. lrbp->cmd = cmd;
  461. lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
  462. lrbp->sense_buffer = cmd->sense_buffer;
  463. lrbp->task_tag = tag;
  464. lrbp->lun = cmd->device->lun;
  465. lrbp->command_type = UTP_CMD_TYPE_SCSI;
  466. /* form UPIU before issuing the command */
  467. ufshcd_compose_upiu(lrbp);
  468. err = ufshcd_map_sg(lrbp);
  469. if (err)
  470. goto out;
  471. /* issue command to the controller */
  472. spin_lock_irqsave(hba->host->host_lock, flags);
  473. ufshcd_send_command(hba, tag);
  474. spin_unlock_irqrestore(hba->host->host_lock, flags);
  475. out:
  476. return err;
  477. }
  478. /**
  479. * ufshcd_memory_alloc - allocate memory for host memory space data structures
  480. * @hba: per adapter instance
  481. *
  482. * 1. Allocate DMA memory for Command Descriptor array
  483. * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
  484. * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
  485. * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
  486. * (UTMRDL)
  487. * 4. Allocate memory for local reference block(lrb).
  488. *
  489. * Returns 0 for success, non-zero in case of failure
  490. */
  491. static int ufshcd_memory_alloc(struct ufs_hba *hba)
  492. {
  493. size_t utmrdl_size, utrdl_size, ucdl_size;
  494. /* Allocate memory for UTP command descriptors */
  495. ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  496. hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
  497. ucdl_size,
  498. &hba->ucdl_dma_addr,
  499. GFP_KERNEL);
  500. /*
  501. * UFSHCI requires UTP command descriptor to be 128 byte aligned.
  502. * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
  503. * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
  504. * be aligned to 128 bytes as well
  505. */
  506. if (!hba->ucdl_base_addr ||
  507. WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
  508. dev_err(hba->dev,
  509. "Command Descriptor Memory allocation failed\n");
  510. goto out;
  511. }
  512. /*
  513. * Allocate memory for UTP Transfer descriptors
  514. * UFSHCI requires 1024 byte alignment of UTRD
  515. */
  516. utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  517. hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
  518. utrdl_size,
  519. &hba->utrdl_dma_addr,
  520. GFP_KERNEL);
  521. if (!hba->utrdl_base_addr ||
  522. WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
  523. dev_err(hba->dev,
  524. "Transfer Descriptor Memory allocation failed\n");
  525. goto out;
  526. }
  527. /*
  528. * Allocate memory for UTP Task Management descriptors
  529. * UFSHCI requires 1024 byte alignment of UTMRD
  530. */
  531. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  532. hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
  533. utmrdl_size,
  534. &hba->utmrdl_dma_addr,
  535. GFP_KERNEL);
  536. if (!hba->utmrdl_base_addr ||
  537. WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
  538. dev_err(hba->dev,
  539. "Task Management Descriptor Memory allocation failed\n");
  540. goto out;
  541. }
  542. /* Allocate memory for local reference block */
  543. hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
  544. if (!hba->lrb) {
  545. dev_err(hba->dev, "LRB Memory allocation failed\n");
  546. goto out;
  547. }
  548. return 0;
  549. out:
  550. ufshcd_free_hba_memory(hba);
  551. return -ENOMEM;
  552. }
  553. /**
  554. * ufshcd_host_memory_configure - configure local reference block with
  555. * memory offsets
  556. * @hba: per adapter instance
  557. *
  558. * Configure Host memory space
  559. * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
  560. * address.
  561. * 2. Update each UTRD with Response UPIU offset, Response UPIU length
  562. * and PRDT offset.
  563. * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
  564. * into local reference block.
  565. */
  566. static void ufshcd_host_memory_configure(struct ufs_hba *hba)
  567. {
  568. struct utp_transfer_cmd_desc *cmd_descp;
  569. struct utp_transfer_req_desc *utrdlp;
  570. dma_addr_t cmd_desc_dma_addr;
  571. dma_addr_t cmd_desc_element_addr;
  572. u16 response_offset;
  573. u16 prdt_offset;
  574. int cmd_desc_size;
  575. int i;
  576. utrdlp = hba->utrdl_base_addr;
  577. cmd_descp = hba->ucdl_base_addr;
  578. response_offset =
  579. offsetof(struct utp_transfer_cmd_desc, response_upiu);
  580. prdt_offset =
  581. offsetof(struct utp_transfer_cmd_desc, prd_table);
  582. cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
  583. cmd_desc_dma_addr = hba->ucdl_dma_addr;
  584. for (i = 0; i < hba->nutrs; i++) {
  585. /* Configure UTRD with command descriptor base address */
  586. cmd_desc_element_addr =
  587. (cmd_desc_dma_addr + (cmd_desc_size * i));
  588. utrdlp[i].command_desc_base_addr_lo =
  589. cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
  590. utrdlp[i].command_desc_base_addr_hi =
  591. cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
  592. /* Response upiu and prdt offset should be in double words */
  593. utrdlp[i].response_upiu_offset =
  594. cpu_to_le16((response_offset >> 2));
  595. utrdlp[i].prd_table_offset =
  596. cpu_to_le16((prdt_offset >> 2));
  597. utrdlp[i].response_upiu_length =
  598. cpu_to_le16(ALIGNED_UPIU_SIZE);
  599. hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
  600. hba->lrb[i].ucd_cmd_ptr =
  601. (struct utp_upiu_cmd *)(cmd_descp + i);
  602. hba->lrb[i].ucd_rsp_ptr =
  603. (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
  604. hba->lrb[i].ucd_prdt_ptr =
  605. (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
  606. }
  607. }
  608. /**
  609. * ufshcd_dme_link_startup - Notify Unipro to perform link startup
  610. * @hba: per adapter instance
  611. *
  612. * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
  613. * in order to initialize the Unipro link startup procedure.
  614. * Once the Unipro links are up, the device connected to the controller
  615. * is detected.
  616. *
  617. * Returns 0 on success, non-zero value on failure
  618. */
  619. static int ufshcd_dme_link_startup(struct ufs_hba *hba)
  620. {
  621. struct uic_command *uic_cmd;
  622. unsigned long flags;
  623. /* check if controller is ready to accept UIC commands */
  624. if ((ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
  625. UIC_COMMAND_READY) == 0x0) {
  626. dev_err(hba->dev,
  627. "Controller not ready"
  628. " to accept UIC commands\n");
  629. return -EIO;
  630. }
  631. spin_lock_irqsave(hba->host->host_lock, flags);
  632. /* form UIC command */
  633. uic_cmd = &hba->active_uic_cmd;
  634. uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
  635. uic_cmd->argument1 = 0;
  636. uic_cmd->argument2 = 0;
  637. uic_cmd->argument3 = 0;
  638. /* enable UIC related interrupts */
  639. hba->int_enable_mask |= UIC_COMMAND_COMPL;
  640. ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
  641. /* sending UIC commands to controller */
  642. ufshcd_send_uic_command(hba, uic_cmd);
  643. spin_unlock_irqrestore(hba->host->host_lock, flags);
  644. return 0;
  645. }
  646. /**
  647. * ufshcd_make_hba_operational - Make UFS controller operational
  648. * @hba: per adapter instance
  649. *
  650. * To bring UFS host controller to operational state,
  651. * 1. Check if device is present
  652. * 2. Configure run-stop-registers
  653. * 3. Enable required interrupts
  654. * 4. Configure interrupt aggregation
  655. *
  656. * Returns 0 on success, non-zero value on failure
  657. */
  658. static int ufshcd_make_hba_operational(struct ufs_hba *hba)
  659. {
  660. int err = 0;
  661. u32 reg;
  662. /* check if device present */
  663. reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
  664. if (!ufshcd_is_device_present(reg)) {
  665. dev_err(hba->dev, "cc: Device not present\n");
  666. err = -ENXIO;
  667. goto out;
  668. }
  669. /*
  670. * UCRDY, UTMRLDY and UTRLRDY bits must be 1
  671. * DEI, HEI bits must be 0
  672. */
  673. if (!(ufshcd_get_lists_status(reg))) {
  674. ufshcd_enable_run_stop_reg(hba);
  675. } else {
  676. dev_err(hba->dev,
  677. "Host controller not ready to process requests");
  678. err = -EIO;
  679. goto out;
  680. }
  681. /* Enable required interrupts */
  682. hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
  683. UIC_ERROR |
  684. UTP_TASK_REQ_COMPL |
  685. DEVICE_FATAL_ERROR |
  686. CONTROLLER_FATAL_ERROR |
  687. SYSTEM_BUS_FATAL_ERROR);
  688. ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
  689. /* Configure interrupt aggregation */
  690. ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
  691. if (hba->ufshcd_state == UFSHCD_STATE_RESET)
  692. scsi_unblock_requests(hba->host);
  693. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  694. scsi_scan_host(hba->host);
  695. out:
  696. return err;
  697. }
  698. /**
  699. * ufshcd_hba_enable - initialize the controller
  700. * @hba: per adapter instance
  701. *
  702. * The controller resets itself and controller firmware initialization
  703. * sequence kicks off. When controller is ready it will set
  704. * the Host Controller Enable bit to 1.
  705. *
  706. * Returns 0 on success, non-zero value on failure
  707. */
  708. static int ufshcd_hba_enable(struct ufs_hba *hba)
  709. {
  710. int retry;
  711. /*
  712. * msleep of 1 and 5 used in this function might result in msleep(20),
  713. * but it was necessary to send the UFS FPGA to reset mode during
  714. * development and testing of this driver. msleep can be changed to
  715. * mdelay and retry count can be reduced based on the controller.
  716. */
  717. if (!ufshcd_is_hba_active(hba)) {
  718. /* change controller state to "reset state" */
  719. ufshcd_hba_stop(hba);
  720. /*
  721. * This delay is based on the testing done with UFS host
  722. * controller FPGA. The delay can be changed based on the
  723. * host controller used.
  724. */
  725. msleep(5);
  726. }
  727. /* start controller initialization sequence */
  728. ufshcd_hba_start(hba);
  729. /*
  730. * To initialize a UFS host controller HCE bit must be set to 1.
  731. * During initialization the HCE bit value changes from 1->0->1.
  732. * When the host controller completes initialization sequence
  733. * it sets the value of HCE bit to 1. The same HCE bit is read back
  734. * to check if the controller has completed initialization sequence.
  735. * So without this delay the value HCE = 1, set in the previous
  736. * instruction might be read back.
  737. * This delay can be changed based on the controller.
  738. */
  739. msleep(1);
  740. /* wait for the host controller to complete initialization */
  741. retry = 10;
  742. while (ufshcd_is_hba_active(hba)) {
  743. if (retry) {
  744. retry--;
  745. } else {
  746. dev_err(hba->dev,
  747. "Controller enable failed\n");
  748. return -EIO;
  749. }
  750. msleep(5);
  751. }
  752. return 0;
  753. }
  754. /**
  755. * ufshcd_initialize_hba - start the initialization process
  756. * @hba: per adapter instance
  757. *
  758. * 1. Enable the controller via ufshcd_hba_enable.
  759. * 2. Program the Transfer Request List Address with the starting address of
  760. * UTRDL.
  761. * 3. Program the Task Management Request List Address with starting address
  762. * of UTMRDL.
  763. *
  764. * Returns 0 on success, non-zero value on failure.
  765. */
  766. static int ufshcd_initialize_hba(struct ufs_hba *hba)
  767. {
  768. if (ufshcd_hba_enable(hba))
  769. return -EIO;
  770. /* Configure UTRL and UTMRL base address registers */
  771. ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
  772. REG_UTP_TRANSFER_REQ_LIST_BASE_L);
  773. ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
  774. REG_UTP_TRANSFER_REQ_LIST_BASE_H);
  775. ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
  776. REG_UTP_TASK_REQ_LIST_BASE_L);
  777. ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
  778. REG_UTP_TASK_REQ_LIST_BASE_H);
  779. /* Initialize unipro link startup procedure */
  780. return ufshcd_dme_link_startup(hba);
  781. }
  782. /**
  783. * ufshcd_do_reset - reset the host controller
  784. * @hba: per adapter instance
  785. *
  786. * Returns SUCCESS/FAILED
  787. */
  788. static int ufshcd_do_reset(struct ufs_hba *hba)
  789. {
  790. struct ufshcd_lrb *lrbp;
  791. unsigned long flags;
  792. int tag;
  793. /* block commands from midlayer */
  794. scsi_block_requests(hba->host);
  795. spin_lock_irqsave(hba->host->host_lock, flags);
  796. hba->ufshcd_state = UFSHCD_STATE_RESET;
  797. /* send controller to reset state */
  798. ufshcd_hba_stop(hba);
  799. spin_unlock_irqrestore(hba->host->host_lock, flags);
  800. /* abort outstanding commands */
  801. for (tag = 0; tag < hba->nutrs; tag++) {
  802. if (test_bit(tag, &hba->outstanding_reqs)) {
  803. lrbp = &hba->lrb[tag];
  804. scsi_dma_unmap(lrbp->cmd);
  805. lrbp->cmd->result = DID_RESET << 16;
  806. lrbp->cmd->scsi_done(lrbp->cmd);
  807. lrbp->cmd = NULL;
  808. }
  809. }
  810. /* clear outstanding request/task bit maps */
  811. hba->outstanding_reqs = 0;
  812. hba->outstanding_tasks = 0;
  813. /* start the initialization process */
  814. if (ufshcd_initialize_hba(hba)) {
  815. dev_err(hba->dev,
  816. "Reset: Controller initialization failed\n");
  817. return FAILED;
  818. }
  819. return SUCCESS;
  820. }
  821. /**
  822. * ufshcd_slave_alloc - handle initial SCSI device configurations
  823. * @sdev: pointer to SCSI device
  824. *
  825. * Returns success
  826. */
  827. static int ufshcd_slave_alloc(struct scsi_device *sdev)
  828. {
  829. struct ufs_hba *hba;
  830. hba = shost_priv(sdev->host);
  831. sdev->tagged_supported = 1;
  832. /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
  833. sdev->use_10_for_ms = 1;
  834. scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
  835. /*
  836. * Inform SCSI Midlayer that the LUN queue depth is same as the
  837. * controller queue depth. If a LUN queue depth is less than the
  838. * controller queue depth and if the LUN reports
  839. * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
  840. * with scsi_adjust_queue_depth.
  841. */
  842. scsi_activate_tcq(sdev, hba->nutrs);
  843. return 0;
  844. }
  845. /**
  846. * ufshcd_slave_destroy - remove SCSI device configurations
  847. * @sdev: pointer to SCSI device
  848. */
  849. static void ufshcd_slave_destroy(struct scsi_device *sdev)
  850. {
  851. struct ufs_hba *hba;
  852. hba = shost_priv(sdev->host);
  853. scsi_deactivate_tcq(sdev, hba->nutrs);
  854. }
  855. /**
  856. * ufshcd_task_req_compl - handle task management request completion
  857. * @hba: per adapter instance
  858. * @index: index of the completed request
  859. *
  860. * Returns SUCCESS/FAILED
  861. */
  862. static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
  863. {
  864. struct utp_task_req_desc *task_req_descp;
  865. struct utp_upiu_task_rsp *task_rsp_upiup;
  866. unsigned long flags;
  867. int ocs_value;
  868. int task_result;
  869. spin_lock_irqsave(hba->host->host_lock, flags);
  870. /* Clear completed tasks from outstanding_tasks */
  871. __clear_bit(index, &hba->outstanding_tasks);
  872. task_req_descp = hba->utmrdl_base_addr;
  873. ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
  874. if (ocs_value == OCS_SUCCESS) {
  875. task_rsp_upiup = (struct utp_upiu_task_rsp *)
  876. task_req_descp[index].task_rsp_upiu;
  877. task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
  878. task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
  879. if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
  880. task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
  881. task_result = FAILED;
  882. else
  883. task_result = SUCCESS;
  884. } else {
  885. task_result = FAILED;
  886. dev_err(hba->dev,
  887. "trc: Invalid ocs = %x\n", ocs_value);
  888. }
  889. spin_unlock_irqrestore(hba->host->host_lock, flags);
  890. return task_result;
  891. }
  892. /**
  893. * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
  894. * SAM_STAT_TASK_SET_FULL SCSI command status.
  895. * @cmd: pointer to SCSI command
  896. */
  897. static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
  898. {
  899. struct ufs_hba *hba;
  900. int i;
  901. int lun_qdepth = 0;
  902. hba = shost_priv(cmd->device->host);
  903. /*
  904. * LUN queue depth can be obtained by counting outstanding commands
  905. * on the LUN.
  906. */
  907. for (i = 0; i < hba->nutrs; i++) {
  908. if (test_bit(i, &hba->outstanding_reqs)) {
  909. /*
  910. * Check if the outstanding command belongs
  911. * to the LUN which reported SAM_STAT_TASK_SET_FULL.
  912. */
  913. if (cmd->device->lun == hba->lrb[i].lun)
  914. lun_qdepth++;
  915. }
  916. }
  917. /*
  918. * LUN queue depth will be total outstanding commands, except the
  919. * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
  920. */
  921. scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
  922. }
  923. /**
  924. * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  925. * @lrb: pointer to local reference block of completed command
  926. * @scsi_status: SCSI command status
  927. *
  928. * Returns value base on SCSI command status
  929. */
  930. static inline int
  931. ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
  932. {
  933. int result = 0;
  934. switch (scsi_status) {
  935. case SAM_STAT_GOOD:
  936. result |= DID_OK << 16 |
  937. COMMAND_COMPLETE << 8 |
  938. SAM_STAT_GOOD;
  939. break;
  940. case SAM_STAT_CHECK_CONDITION:
  941. result |= DID_OK << 16 |
  942. COMMAND_COMPLETE << 8 |
  943. SAM_STAT_CHECK_CONDITION;
  944. ufshcd_copy_sense_data(lrbp);
  945. break;
  946. case SAM_STAT_BUSY:
  947. result |= SAM_STAT_BUSY;
  948. break;
  949. case SAM_STAT_TASK_SET_FULL:
  950. /*
  951. * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
  952. * depth needs to be adjusted to the exact number of
  953. * outstanding commands the LUN can handle at any given time.
  954. */
  955. ufshcd_adjust_lun_qdepth(lrbp->cmd);
  956. result |= SAM_STAT_TASK_SET_FULL;
  957. break;
  958. case SAM_STAT_TASK_ABORTED:
  959. result |= SAM_STAT_TASK_ABORTED;
  960. break;
  961. default:
  962. result |= DID_ERROR << 16;
  963. break;
  964. } /* end of switch */
  965. return result;
  966. }
  967. /**
  968. * ufshcd_transfer_rsp_status - Get overall status of the response
  969. * @hba: per adapter instance
  970. * @lrb: pointer to local reference block of completed command
  971. *
  972. * Returns result of the command to notify SCSI midlayer
  973. */
  974. static inline int
  975. ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  976. {
  977. int result = 0;
  978. int scsi_status;
  979. int ocs;
  980. /* overall command status of utrd */
  981. ocs = ufshcd_get_tr_ocs(lrbp);
  982. switch (ocs) {
  983. case OCS_SUCCESS:
  984. /* check if the returned transfer response is valid */
  985. result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
  986. if (result) {
  987. dev_err(hba->dev,
  988. "Invalid response = %x\n", result);
  989. break;
  990. }
  991. /*
  992. * get the response UPIU result to extract
  993. * the SCSI command status
  994. */
  995. result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
  996. /*
  997. * get the result based on SCSI status response
  998. * to notify the SCSI midlayer of the command status
  999. */
  1000. scsi_status = result & MASK_SCSI_STATUS;
  1001. result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  1002. break;
  1003. case OCS_ABORTED:
  1004. result |= DID_ABORT << 16;
  1005. break;
  1006. case OCS_INVALID_CMD_TABLE_ATTR:
  1007. case OCS_INVALID_PRDT_ATTR:
  1008. case OCS_MISMATCH_DATA_BUF_SIZE:
  1009. case OCS_MISMATCH_RESP_UPIU_SIZE:
  1010. case OCS_PEER_COMM_FAILURE:
  1011. case OCS_FATAL_ERROR:
  1012. default:
  1013. result |= DID_ERROR << 16;
  1014. dev_err(hba->dev,
  1015. "OCS error from controller = %x\n", ocs);
  1016. break;
  1017. } /* end of switch */
  1018. return result;
  1019. }
  1020. /**
  1021. * ufshcd_transfer_req_compl - handle SCSI and query command completion
  1022. * @hba: per adapter instance
  1023. */
  1024. static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
  1025. {
  1026. struct ufshcd_lrb *lrb;
  1027. unsigned long completed_reqs;
  1028. u32 tr_doorbell;
  1029. int result;
  1030. int index;
  1031. lrb = hba->lrb;
  1032. tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  1033. completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
  1034. for (index = 0; index < hba->nutrs; index++) {
  1035. if (test_bit(index, &completed_reqs)) {
  1036. result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
  1037. if (lrb[index].cmd) {
  1038. scsi_dma_unmap(lrb[index].cmd);
  1039. lrb[index].cmd->result = result;
  1040. lrb[index].cmd->scsi_done(lrb[index].cmd);
  1041. /* Mark completed command as NULL in LRB */
  1042. lrb[index].cmd = NULL;
  1043. }
  1044. } /* end of if */
  1045. } /* end of for */
  1046. /* clear corresponding bits of completed commands */
  1047. hba->outstanding_reqs ^= completed_reqs;
  1048. /* Reset interrupt aggregation counters */
  1049. ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
  1050. }
  1051. /**
  1052. * ufshcd_uic_cc_handler - handle UIC command completion
  1053. * @work: pointer to a work queue structure
  1054. *
  1055. * Returns 0 on success, non-zero value on failure
  1056. */
  1057. static void ufshcd_uic_cc_handler (struct work_struct *work)
  1058. {
  1059. struct ufs_hba *hba;
  1060. hba = container_of(work, struct ufs_hba, uic_workq);
  1061. if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
  1062. !(ufshcd_get_uic_cmd_result(hba))) {
  1063. if (ufshcd_make_hba_operational(hba))
  1064. dev_err(hba->dev,
  1065. "cc: hba not operational state\n");
  1066. return;
  1067. }
  1068. }
  1069. /**
  1070. * ufshcd_fatal_err_handler - handle fatal errors
  1071. * @hba: per adapter instance
  1072. */
  1073. static void ufshcd_fatal_err_handler(struct work_struct *work)
  1074. {
  1075. struct ufs_hba *hba;
  1076. hba = container_of(work, struct ufs_hba, feh_workq);
  1077. /* check if reset is already in progress */
  1078. if (hba->ufshcd_state != UFSHCD_STATE_RESET)
  1079. ufshcd_do_reset(hba);
  1080. }
  1081. /**
  1082. * ufshcd_err_handler - Check for fatal errors
  1083. * @work: pointer to a work queue structure
  1084. */
  1085. static void ufshcd_err_handler(struct ufs_hba *hba)
  1086. {
  1087. u32 reg;
  1088. if (hba->errors & INT_FATAL_ERRORS)
  1089. goto fatal_eh;
  1090. if (hba->errors & UIC_ERROR) {
  1091. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
  1092. if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  1093. goto fatal_eh;
  1094. }
  1095. return;
  1096. fatal_eh:
  1097. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  1098. schedule_work(&hba->feh_workq);
  1099. }
  1100. /**
  1101. * ufshcd_tmc_handler - handle task management function completion
  1102. * @hba: per adapter instance
  1103. */
  1104. static void ufshcd_tmc_handler(struct ufs_hba *hba)
  1105. {
  1106. u32 tm_doorbell;
  1107. tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
  1108. hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
  1109. wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
  1110. }
  1111. /**
  1112. * ufshcd_sl_intr - Interrupt service routine
  1113. * @hba: per adapter instance
  1114. * @intr_status: contains interrupts generated by the controller
  1115. */
  1116. static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  1117. {
  1118. hba->errors = UFSHCD_ERROR_MASK & intr_status;
  1119. if (hba->errors)
  1120. ufshcd_err_handler(hba);
  1121. if (intr_status & UIC_COMMAND_COMPL)
  1122. schedule_work(&hba->uic_workq);
  1123. if (intr_status & UTP_TASK_REQ_COMPL)
  1124. ufshcd_tmc_handler(hba);
  1125. if (intr_status & UTP_TRANSFER_REQ_COMPL)
  1126. ufshcd_transfer_req_compl(hba);
  1127. }
  1128. /**
  1129. * ufshcd_intr - Main interrupt service routine
  1130. * @irq: irq number
  1131. * @__hba: pointer to adapter instance
  1132. *
  1133. * Returns IRQ_HANDLED - If interrupt is valid
  1134. * IRQ_NONE - If invalid interrupt
  1135. */
  1136. static irqreturn_t ufshcd_intr(int irq, void *__hba)
  1137. {
  1138. u32 intr_status;
  1139. irqreturn_t retval = IRQ_NONE;
  1140. struct ufs_hba *hba = __hba;
  1141. spin_lock(hba->host->host_lock);
  1142. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  1143. if (intr_status) {
  1144. ufshcd_sl_intr(hba, intr_status);
  1145. /* If UFSHCI 1.0 then clear interrupt status register */
  1146. if (hba->ufs_version == UFSHCI_VERSION_10)
  1147. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  1148. retval = IRQ_HANDLED;
  1149. }
  1150. spin_unlock(hba->host->host_lock);
  1151. return retval;
  1152. }
  1153. /**
  1154. * ufshcd_issue_tm_cmd - issues task management commands to controller
  1155. * @hba: per adapter instance
  1156. * @lrbp: pointer to local reference block
  1157. *
  1158. * Returns SUCCESS/FAILED
  1159. */
  1160. static int
  1161. ufshcd_issue_tm_cmd(struct ufs_hba *hba,
  1162. struct ufshcd_lrb *lrbp,
  1163. u8 tm_function)
  1164. {
  1165. struct utp_task_req_desc *task_req_descp;
  1166. struct utp_upiu_task_req *task_req_upiup;
  1167. struct Scsi_Host *host;
  1168. unsigned long flags;
  1169. int free_slot = 0;
  1170. int err;
  1171. host = hba->host;
  1172. spin_lock_irqsave(host->host_lock, flags);
  1173. /* If task management queue is full */
  1174. free_slot = ufshcd_get_tm_free_slot(hba);
  1175. if (free_slot >= hba->nutmrs) {
  1176. spin_unlock_irqrestore(host->host_lock, flags);
  1177. dev_err(hba->dev, "Task management queue full\n");
  1178. err = FAILED;
  1179. goto out;
  1180. }
  1181. task_req_descp = hba->utmrdl_base_addr;
  1182. task_req_descp += free_slot;
  1183. /* Configure task request descriptor */
  1184. task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
  1185. task_req_descp->header.dword_2 =
  1186. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  1187. /* Configure task request UPIU */
  1188. task_req_upiup =
  1189. (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
  1190. task_req_upiup->header.dword_0 =
  1191. cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
  1192. lrbp->lun, lrbp->task_tag));
  1193. task_req_upiup->header.dword_1 =
  1194. cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
  1195. task_req_upiup->input_param1 = lrbp->lun;
  1196. task_req_upiup->input_param1 =
  1197. cpu_to_be32(task_req_upiup->input_param1);
  1198. task_req_upiup->input_param2 = lrbp->task_tag;
  1199. task_req_upiup->input_param2 =
  1200. cpu_to_be32(task_req_upiup->input_param2);
  1201. /* send command to the controller */
  1202. __set_bit(free_slot, &hba->outstanding_tasks);
  1203. ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
  1204. spin_unlock_irqrestore(host->host_lock, flags);
  1205. /* wait until the task management command is completed */
  1206. err =
  1207. wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
  1208. (test_bit(free_slot,
  1209. &hba->tm_condition) != 0),
  1210. 60 * HZ);
  1211. if (!err) {
  1212. dev_err(hba->dev,
  1213. "Task management command timed-out\n");
  1214. err = FAILED;
  1215. goto out;
  1216. }
  1217. clear_bit(free_slot, &hba->tm_condition);
  1218. err = ufshcd_task_req_compl(hba, free_slot);
  1219. out:
  1220. return err;
  1221. }
  1222. /**
  1223. * ufshcd_device_reset - reset device and abort all the pending commands
  1224. * @cmd: SCSI command pointer
  1225. *
  1226. * Returns SUCCESS/FAILED
  1227. */
  1228. static int ufshcd_device_reset(struct scsi_cmnd *cmd)
  1229. {
  1230. struct Scsi_Host *host;
  1231. struct ufs_hba *hba;
  1232. unsigned int tag;
  1233. u32 pos;
  1234. int err;
  1235. host = cmd->device->host;
  1236. hba = shost_priv(host);
  1237. tag = cmd->request->tag;
  1238. err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
  1239. if (err == FAILED)
  1240. goto out;
  1241. for (pos = 0; pos < hba->nutrs; pos++) {
  1242. if (test_bit(pos, &hba->outstanding_reqs) &&
  1243. (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
  1244. /* clear the respective UTRLCLR register bit */
  1245. ufshcd_utrl_clear(hba, pos);
  1246. clear_bit(pos, &hba->outstanding_reqs);
  1247. if (hba->lrb[pos].cmd) {
  1248. scsi_dma_unmap(hba->lrb[pos].cmd);
  1249. hba->lrb[pos].cmd->result =
  1250. DID_ABORT << 16;
  1251. hba->lrb[pos].cmd->scsi_done(cmd);
  1252. hba->lrb[pos].cmd = NULL;
  1253. }
  1254. }
  1255. } /* end of for */
  1256. out:
  1257. return err;
  1258. }
  1259. /**
  1260. * ufshcd_host_reset - Main reset function registered with scsi layer
  1261. * @cmd: SCSI command pointer
  1262. *
  1263. * Returns SUCCESS/FAILED
  1264. */
  1265. static int ufshcd_host_reset(struct scsi_cmnd *cmd)
  1266. {
  1267. struct ufs_hba *hba;
  1268. hba = shost_priv(cmd->device->host);
  1269. if (hba->ufshcd_state == UFSHCD_STATE_RESET)
  1270. return SUCCESS;
  1271. return ufshcd_do_reset(hba);
  1272. }
  1273. /**
  1274. * ufshcd_abort - abort a specific command
  1275. * @cmd: SCSI command pointer
  1276. *
  1277. * Returns SUCCESS/FAILED
  1278. */
  1279. static int ufshcd_abort(struct scsi_cmnd *cmd)
  1280. {
  1281. struct Scsi_Host *host;
  1282. struct ufs_hba *hba;
  1283. unsigned long flags;
  1284. unsigned int tag;
  1285. int err;
  1286. host = cmd->device->host;
  1287. hba = shost_priv(host);
  1288. tag = cmd->request->tag;
  1289. spin_lock_irqsave(host->host_lock, flags);
  1290. /* check if command is still pending */
  1291. if (!(test_bit(tag, &hba->outstanding_reqs))) {
  1292. err = FAILED;
  1293. spin_unlock_irqrestore(host->host_lock, flags);
  1294. goto out;
  1295. }
  1296. spin_unlock_irqrestore(host->host_lock, flags);
  1297. err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
  1298. if (err == FAILED)
  1299. goto out;
  1300. scsi_dma_unmap(cmd);
  1301. spin_lock_irqsave(host->host_lock, flags);
  1302. /* clear the respective UTRLCLR register bit */
  1303. ufshcd_utrl_clear(hba, tag);
  1304. __clear_bit(tag, &hba->outstanding_reqs);
  1305. hba->lrb[tag].cmd = NULL;
  1306. spin_unlock_irqrestore(host->host_lock, flags);
  1307. out:
  1308. return err;
  1309. }
  1310. static struct scsi_host_template ufshcd_driver_template = {
  1311. .module = THIS_MODULE,
  1312. .name = UFSHCD,
  1313. .proc_name = UFSHCD,
  1314. .queuecommand = ufshcd_queuecommand,
  1315. .slave_alloc = ufshcd_slave_alloc,
  1316. .slave_destroy = ufshcd_slave_destroy,
  1317. .eh_abort_handler = ufshcd_abort,
  1318. .eh_device_reset_handler = ufshcd_device_reset,
  1319. .eh_host_reset_handler = ufshcd_host_reset,
  1320. .this_id = -1,
  1321. .sg_tablesize = SG_ALL,
  1322. .cmd_per_lun = UFSHCD_CMD_PER_LUN,
  1323. .can_queue = UFSHCD_CAN_QUEUE,
  1324. };
  1325. /**
  1326. * ufshcd_suspend - suspend power management function
  1327. * @hba: per adapter instance
  1328. * @state: power state
  1329. *
  1330. * Returns -ENOSYS
  1331. */
  1332. int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
  1333. {
  1334. /*
  1335. * TODO:
  1336. * 1. Block SCSI requests from SCSI midlayer
  1337. * 2. Change the internal driver state to non operational
  1338. * 3. Set UTRLRSR and UTMRLRSR bits to zero
  1339. * 4. Wait until outstanding commands are completed
  1340. * 5. Set HCE to zero to send the UFS host controller to reset state
  1341. */
  1342. return -ENOSYS;
  1343. }
  1344. EXPORT_SYMBOL_GPL(ufshcd_suspend);
  1345. /**
  1346. * ufshcd_resume - resume power management function
  1347. * @hba: per adapter instance
  1348. *
  1349. * Returns -ENOSYS
  1350. */
  1351. int ufshcd_resume(struct ufs_hba *hba)
  1352. {
  1353. /*
  1354. * TODO:
  1355. * 1. Set HCE to 1, to start the UFS host controller
  1356. * initialization process
  1357. * 2. Set UTRLRSR and UTMRLRSR bits to 1
  1358. * 3. Change the internal driver state to operational
  1359. * 4. Unblock SCSI requests from SCSI midlayer
  1360. */
  1361. return -ENOSYS;
  1362. }
  1363. EXPORT_SYMBOL_GPL(ufshcd_resume);
  1364. /**
  1365. * ufshcd_hba_free - free allocated memory for
  1366. * host memory space data structures
  1367. * @hba: per adapter instance
  1368. */
  1369. static void ufshcd_hba_free(struct ufs_hba *hba)
  1370. {
  1371. iounmap(hba->mmio_base);
  1372. ufshcd_free_hba_memory(hba);
  1373. }
  1374. /**
  1375. * ufshcd_remove - de-allocate SCSI host and host memory space
  1376. * data structure memory
  1377. * @hba - per adapter instance
  1378. */
  1379. void ufshcd_remove(struct ufs_hba *hba)
  1380. {
  1381. /* disable interrupts */
  1382. ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
  1383. ufshcd_hba_stop(hba);
  1384. ufshcd_hba_free(hba);
  1385. scsi_remove_host(hba->host);
  1386. scsi_host_put(hba->host);
  1387. }
  1388. EXPORT_SYMBOL_GPL(ufshcd_remove);
  1389. /**
  1390. * ufshcd_init - Driver initialization routine
  1391. * @dev: pointer to device handle
  1392. * @hba_handle: driver private handle
  1393. * @mmio_base: base register address
  1394. * @irq: Interrupt line of device
  1395. * Returns 0 on success, non-zero value on failure
  1396. */
  1397. int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
  1398. void __iomem *mmio_base, unsigned int irq)
  1399. {
  1400. struct Scsi_Host *host;
  1401. struct ufs_hba *hba;
  1402. int err;
  1403. if (!dev) {
  1404. dev_err(dev,
  1405. "Invalid memory reference for dev is NULL\n");
  1406. err = -ENODEV;
  1407. goto out_error;
  1408. }
  1409. if (!mmio_base) {
  1410. dev_err(dev,
  1411. "Invalid memory reference for mmio_base is NULL\n");
  1412. err = -ENODEV;
  1413. goto out_error;
  1414. }
  1415. host = scsi_host_alloc(&ufshcd_driver_template,
  1416. sizeof(struct ufs_hba));
  1417. if (!host) {
  1418. dev_err(dev, "scsi_host_alloc failed\n");
  1419. err = -ENOMEM;
  1420. goto out_error;
  1421. }
  1422. hba = shost_priv(host);
  1423. hba->host = host;
  1424. hba->dev = dev;
  1425. hba->mmio_base = mmio_base;
  1426. hba->irq = irq;
  1427. /* Read capabilities registers */
  1428. ufshcd_hba_capabilities(hba);
  1429. /* Get UFS version supported by the controller */
  1430. hba->ufs_version = ufshcd_get_ufs_version(hba);
  1431. /* Allocate memory for host memory space */
  1432. err = ufshcd_memory_alloc(hba);
  1433. if (err) {
  1434. dev_err(hba->dev, "Memory allocation failed\n");
  1435. goto out_disable;
  1436. }
  1437. /* Configure LRB */
  1438. ufshcd_host_memory_configure(hba);
  1439. host->can_queue = hba->nutrs;
  1440. host->cmd_per_lun = hba->nutrs;
  1441. host->max_id = UFSHCD_MAX_ID;
  1442. host->max_lun = UFSHCD_MAX_LUNS;
  1443. host->max_channel = UFSHCD_MAX_CHANNEL;
  1444. host->unique_id = host->host_no;
  1445. host->max_cmd_len = MAX_CDB_SIZE;
  1446. /* Initailize wait queue for task management */
  1447. init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
  1448. /* Initialize work queues */
  1449. INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
  1450. INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
  1451. /* IRQ registration */
  1452. err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
  1453. if (err) {
  1454. dev_err(hba->dev, "request irq failed\n");
  1455. goto out_lrb_free;
  1456. }
  1457. /* Enable SCSI tag mapping */
  1458. err = scsi_init_shared_tag_map(host, host->can_queue);
  1459. if (err) {
  1460. dev_err(hba->dev, "init shared queue failed\n");
  1461. goto out_free_irq;
  1462. }
  1463. err = scsi_add_host(host, hba->dev);
  1464. if (err) {
  1465. dev_err(hba->dev, "scsi_add_host failed\n");
  1466. goto out_free_irq;
  1467. }
  1468. /* Initialization routine */
  1469. err = ufshcd_initialize_hba(hba);
  1470. if (err) {
  1471. dev_err(hba->dev, "Initialization failed\n");
  1472. goto out_remove_scsi_host;
  1473. }
  1474. *hba_handle = hba;
  1475. return 0;
  1476. out_remove_scsi_host:
  1477. scsi_remove_host(hba->host);
  1478. out_free_irq:
  1479. free_irq(irq, hba);
  1480. out_lrb_free:
  1481. ufshcd_free_hba_memory(hba);
  1482. out_disable:
  1483. scsi_host_put(host);
  1484. out_error:
  1485. return err;
  1486. }
  1487. EXPORT_SYMBOL_GPL(ufshcd_init);
  1488. MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
  1489. MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
  1490. MODULE_DESCRIPTION("Generic UFS host controller driver Core");
  1491. MODULE_LICENSE("GPL");
  1492. MODULE_VERSION(UFSHCD_DRIVER_VERSION);