ufshcd.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841
  1. /*
  2. * Universal Flash Storage Host controller driver Core
  3. *
  4. * This code is based on drivers/scsi/ufs/ufshcd.c
  5. * Copyright (C) 2011-2013 Samsung India Software Operations
  6. *
  7. * Authors:
  8. * Santosh Yaraganavi <santosh.sy@samsung.com>
  9. * Vinayak Holikatti <h.vinayak@samsung.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. * See the COPYING file in the top-level directory or visit
  16. * <http://www.gnu.org/licenses/gpl-2.0.html>
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * This program is provided "AS IS" and "WITH ALL FAULTS" and
  24. * without warranty of any kind. You are solely responsible for
  25. * determining the appropriateness of using and distributing
  26. * the program and assume all risks associated with your exercise
  27. * of rights with respect to the program, including but not limited
  28. * to infringement of third party rights, the risks and costs of
  29. * program errors, damage to or loss of data, programs or equipment,
  30. * and unavailability or interruption of operations. Under no
  31. * circumstances will the contributor of this Program be liable for
  32. * any damages of any kind arising from your use or distribution of
  33. * this program.
  34. */
  35. #include <linux/async.h>
  36. #include "ufshcd.h"
  37. #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
  38. UTP_TASK_REQ_COMPL |\
  39. UFSHCD_ERROR_MASK)
  40. /* UIC command timeout, unit: ms */
  41. #define UIC_CMD_TIMEOUT 500
  42. enum {
  43. UFSHCD_MAX_CHANNEL = 0,
  44. UFSHCD_MAX_ID = 1,
  45. UFSHCD_MAX_LUNS = 8,
  46. UFSHCD_CMD_PER_LUN = 32,
  47. UFSHCD_CAN_QUEUE = 32,
  48. };
  49. /* UFSHCD states */
  50. enum {
  51. UFSHCD_STATE_OPERATIONAL,
  52. UFSHCD_STATE_RESET,
  53. UFSHCD_STATE_ERROR,
  54. };
  55. /* Interrupt configuration options */
  56. enum {
  57. UFSHCD_INT_DISABLE,
  58. UFSHCD_INT_ENABLE,
  59. UFSHCD_INT_CLEAR,
  60. };
  61. /* Interrupt aggregation options */
  62. enum {
  63. INT_AGGR_RESET,
  64. INT_AGGR_CONFIG,
  65. };
  66. /**
  67. * ufshcd_get_intr_mask - Get the interrupt bit mask
  68. * @hba - Pointer to adapter instance
  69. *
  70. * Returns interrupt bit mask per version
  71. */
  72. static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  73. {
  74. if (hba->ufs_version == UFSHCI_VERSION_10)
  75. return INTERRUPT_MASK_ALL_VER_10;
  76. else
  77. return INTERRUPT_MASK_ALL_VER_11;
  78. }
  79. /**
  80. * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  81. * @hba - Pointer to adapter instance
  82. *
  83. * Returns UFSHCI version supported by the controller
  84. */
  85. static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  86. {
  87. return ufshcd_readl(hba, REG_UFS_VERSION);
  88. }
  89. /**
  90. * ufshcd_is_device_present - Check if any device connected to
  91. * the host controller
  92. * @reg_hcs - host controller status register value
  93. *
  94. * Returns 1 if device present, 0 if no device detected
  95. */
  96. static inline int ufshcd_is_device_present(u32 reg_hcs)
  97. {
  98. return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
  99. }
  100. /**
  101. * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
  102. * @lrb: pointer to local command reference block
  103. *
  104. * This function is used to get the OCS field from UTRD
  105. * Returns the OCS field in the UTRD
  106. */
  107. static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  108. {
  109. return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
  110. }
  111. /**
  112. * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
  113. * @task_req_descp: pointer to utp_task_req_desc structure
  114. *
  115. * This function is used to get the OCS field from UTMRD
  116. * Returns the OCS field in the UTMRD
  117. */
  118. static inline int
  119. ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
  120. {
  121. return task_req_descp->header.dword_2 & MASK_OCS;
  122. }
  123. /**
  124. * ufshcd_get_tm_free_slot - get a free slot for task management request
  125. * @hba: per adapter instance
  126. *
  127. * Returns maximum number of task management request slots in case of
  128. * task management queue full or returns the free slot number
  129. */
  130. static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
  131. {
  132. return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
  133. }
  134. /**
  135. * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  136. * @hba: per adapter instance
  137. * @pos: position of the bit to be cleared
  138. */
  139. static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
  140. {
  141. ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  142. }
  143. /**
  144. * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  145. * @reg: Register value of host controller status
  146. *
  147. * Returns integer, 0 on Success and positive value if failed
  148. */
  149. static inline int ufshcd_get_lists_status(u32 reg)
  150. {
  151. /*
  152. * The mask 0xFF is for the following HCS register bits
  153. * Bit Description
  154. * 0 Device Present
  155. * 1 UTRLRDY
  156. * 2 UTMRLRDY
  157. * 3 UCRDY
  158. * 4 HEI
  159. * 5 DEI
  160. * 6-7 reserved
  161. */
  162. return (((reg) & (0xFF)) >> 1) ^ (0x07);
  163. }
  164. /**
  165. * ufshcd_get_uic_cmd_result - Get the UIC command result
  166. * @hba: Pointer to adapter instance
  167. *
  168. * This function gets the result of UIC command completion
  169. * Returns 0 on success, non zero value on error
  170. */
  171. static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
  172. {
  173. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
  174. MASK_UIC_COMMAND_RESULT;
  175. }
  176. /**
  177. * ufshcd_free_hba_memory - Free allocated memory for LRB, request
  178. * and task lists
  179. * @hba: Pointer to adapter instance
  180. */
  181. static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
  182. {
  183. size_t utmrdl_size, utrdl_size, ucdl_size;
  184. kfree(hba->lrb);
  185. if (hba->utmrdl_base_addr) {
  186. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  187. dma_free_coherent(hba->dev, utmrdl_size,
  188. hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
  189. }
  190. if (hba->utrdl_base_addr) {
  191. utrdl_size =
  192. (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  193. dma_free_coherent(hba->dev, utrdl_size,
  194. hba->utrdl_base_addr, hba->utrdl_dma_addr);
  195. }
  196. if (hba->ucdl_base_addr) {
  197. ucdl_size =
  198. (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  199. dma_free_coherent(hba->dev, ucdl_size,
  200. hba->ucdl_base_addr, hba->ucdl_dma_addr);
  201. }
  202. }
  203. /**
  204. * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
  205. * @ucd_rsp_ptr: pointer to response UPIU
  206. *
  207. * This function checks the response UPIU for valid transaction type in
  208. * response field
  209. * Returns 0 on success, non-zero on failure
  210. */
  211. static inline int
  212. ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
  213. {
  214. return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
  215. UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
  216. }
  217. /**
  218. * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
  219. * @ucd_rsp_ptr: pointer to response UPIU
  220. *
  221. * This function gets the response status and scsi_status from response UPIU
  222. * Returns the response result code.
  223. */
  224. static inline int
  225. ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
  226. {
  227. return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
  228. }
  229. /**
  230. * ufshcd_config_int_aggr - Configure interrupt aggregation values.
  231. * Currently there is no use case where we want to configure
  232. * interrupt aggregation dynamically. So to configure interrupt
  233. * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
  234. * INT_AGGR_TIMEOUT_VALUE are used.
  235. * @hba: per adapter instance
  236. * @option: Interrupt aggregation option
  237. */
  238. static inline void
  239. ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
  240. {
  241. switch (option) {
  242. case INT_AGGR_RESET:
  243. ufshcd_writel(hba, INT_AGGR_ENABLE |
  244. INT_AGGR_COUNTER_AND_TIMER_RESET,
  245. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  246. break;
  247. case INT_AGGR_CONFIG:
  248. ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
  249. INT_AGGR_COUNTER_THRESHOLD_VALUE |
  250. INT_AGGR_TIMEOUT_VALUE,
  251. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  252. break;
  253. }
  254. }
  255. /**
  256. * ufshcd_enable_run_stop_reg - Enable run-stop registers,
  257. * When run-stop registers are set to 1, it indicates the
  258. * host controller that it can process the requests
  259. * @hba: per adapter instance
  260. */
  261. static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
  262. {
  263. ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
  264. REG_UTP_TASK_REQ_LIST_RUN_STOP);
  265. ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
  266. REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
  267. }
  268. /**
  269. * ufshcd_hba_start - Start controller initialization sequence
  270. * @hba: per adapter instance
  271. */
  272. static inline void ufshcd_hba_start(struct ufs_hba *hba)
  273. {
  274. ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
  275. }
  276. /**
  277. * ufshcd_is_hba_active - Get controller state
  278. * @hba: per adapter instance
  279. *
  280. * Returns zero if controller is active, 1 otherwise
  281. */
  282. static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
  283. {
  284. return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
  285. }
  286. /**
  287. * ufshcd_send_command - Send SCSI or device management commands
  288. * @hba: per adapter instance
  289. * @task_tag: Task tag of the command
  290. */
  291. static inline
  292. void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  293. {
  294. __set_bit(task_tag, &hba->outstanding_reqs);
  295. ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  296. }
  297. /**
  298. * ufshcd_copy_sense_data - Copy sense data in case of check condition
  299. * @lrb - pointer to local reference block
  300. */
  301. static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
  302. {
  303. int len;
  304. if (lrbp->sense_buffer) {
  305. len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
  306. memcpy(lrbp->sense_buffer,
  307. lrbp->ucd_rsp_ptr->sense_data,
  308. min_t(int, len, SCSI_SENSE_BUFFERSIZE));
  309. }
  310. }
  311. /**
  312. * ufshcd_hba_capabilities - Read controller capabilities
  313. * @hba: per adapter instance
  314. */
  315. static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
  316. {
  317. hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
  318. /* nutrs and nutmrs are 0 based values */
  319. hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
  320. hba->nutmrs =
  321. ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
  322. }
  323. /**
  324. * ufshcd_ready_for_uic_cmd - Check if controller is ready
  325. * to accept UIC commands
  326. * @hba: per adapter instance
  327. * Return true on success, else false
  328. */
  329. static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
  330. {
  331. if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
  332. return true;
  333. else
  334. return false;
  335. }
  336. /**
  337. * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
  338. * @hba: per adapter instance
  339. * @uic_cmd: UIC command
  340. *
  341. * Mutex must be held.
  342. */
  343. static inline void
  344. ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  345. {
  346. WARN_ON(hba->active_uic_cmd);
  347. hba->active_uic_cmd = uic_cmd;
  348. /* Write Args */
  349. ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
  350. ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
  351. ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
  352. /* Write UIC Cmd */
  353. ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
  354. REG_UIC_COMMAND);
  355. }
  356. /**
  357. * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
  358. * @hba: per adapter instance
  359. * @uic_command: UIC command
  360. *
  361. * Must be called with mutex held.
  362. * Returns 0 only if success.
  363. */
  364. static int
  365. ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  366. {
  367. int ret;
  368. unsigned long flags;
  369. if (wait_for_completion_timeout(&uic_cmd->done,
  370. msecs_to_jiffies(UIC_CMD_TIMEOUT)))
  371. ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
  372. else
  373. ret = -ETIMEDOUT;
  374. spin_lock_irqsave(hba->host->host_lock, flags);
  375. hba->active_uic_cmd = NULL;
  376. spin_unlock_irqrestore(hba->host->host_lock, flags);
  377. return ret;
  378. }
  379. /**
  380. * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  381. * @hba: per adapter instance
  382. * @uic_cmd: UIC command
  383. *
  384. * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  385. * with mutex held.
  386. * Returns 0 only if success.
  387. */
  388. static int
  389. __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  390. {
  391. int ret;
  392. unsigned long flags;
  393. if (!ufshcd_ready_for_uic_cmd(hba)) {
  394. dev_err(hba->dev,
  395. "Controller not ready to accept UIC commands\n");
  396. return -EIO;
  397. }
  398. init_completion(&uic_cmd->done);
  399. spin_lock_irqsave(hba->host->host_lock, flags);
  400. ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  401. spin_unlock_irqrestore(hba->host->host_lock, flags);
  402. ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  403. return ret;
  404. }
  405. /**
  406. * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  407. * @hba: per adapter instance
  408. * @uic_cmd: UIC command
  409. *
  410. * Returns 0 only if success.
  411. */
  412. static int
  413. ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  414. {
  415. int ret;
  416. mutex_lock(&hba->uic_cmd_mutex);
  417. ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
  418. mutex_unlock(&hba->uic_cmd_mutex);
  419. return ret;
  420. }
  421. /**
  422. * ufshcd_map_sg - Map scatter-gather list to prdt
  423. * @lrbp - pointer to local reference block
  424. *
  425. * Returns 0 in case of success, non-zero value in case of failure
  426. */
  427. static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
  428. {
  429. struct ufshcd_sg_entry *prd_table;
  430. struct scatterlist *sg;
  431. struct scsi_cmnd *cmd;
  432. int sg_segments;
  433. int i;
  434. cmd = lrbp->cmd;
  435. sg_segments = scsi_dma_map(cmd);
  436. if (sg_segments < 0)
  437. return sg_segments;
  438. if (sg_segments) {
  439. lrbp->utr_descriptor_ptr->prd_table_length =
  440. cpu_to_le16((u16) (sg_segments));
  441. prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
  442. scsi_for_each_sg(cmd, sg, sg_segments, i) {
  443. prd_table[i].size =
  444. cpu_to_le32(((u32) sg_dma_len(sg))-1);
  445. prd_table[i].base_addr =
  446. cpu_to_le32(lower_32_bits(sg->dma_address));
  447. prd_table[i].upper_addr =
  448. cpu_to_le32(upper_32_bits(sg->dma_address));
  449. }
  450. } else {
  451. lrbp->utr_descriptor_ptr->prd_table_length = 0;
  452. }
  453. return 0;
  454. }
  455. /**
  456. * ufshcd_enable_intr - enable interrupts
  457. * @hba: per adapter instance
  458. * @intrs: interrupt bits
  459. */
  460. static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
  461. {
  462. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  463. if (hba->ufs_version == UFSHCI_VERSION_10) {
  464. u32 rw;
  465. rw = set & INTERRUPT_MASK_RW_VER_10;
  466. set = rw | ((set ^ intrs) & intrs);
  467. } else {
  468. set |= intrs;
  469. }
  470. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  471. }
  472. /**
  473. * ufshcd_disable_intr - disable interrupts
  474. * @hba: per adapter instance
  475. * @intrs: interrupt bits
  476. */
  477. static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
  478. {
  479. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  480. if (hba->ufs_version == UFSHCI_VERSION_10) {
  481. u32 rw;
  482. rw = (set & INTERRUPT_MASK_RW_VER_10) &
  483. ~(intrs & INTERRUPT_MASK_RW_VER_10);
  484. set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
  485. } else {
  486. set &= ~intrs;
  487. }
  488. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  489. }
  490. /**
  491. * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
  492. * @lrb - pointer to local reference block
  493. */
  494. static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
  495. {
  496. struct utp_transfer_req_desc *req_desc;
  497. struct utp_upiu_cmd *ucd_cmd_ptr;
  498. u32 data_direction;
  499. u32 upiu_flags;
  500. ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
  501. req_desc = lrbp->utr_descriptor_ptr;
  502. switch (lrbp->command_type) {
  503. case UTP_CMD_TYPE_SCSI:
  504. if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  505. data_direction = UTP_DEVICE_TO_HOST;
  506. upiu_flags = UPIU_CMD_FLAGS_READ;
  507. } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  508. data_direction = UTP_HOST_TO_DEVICE;
  509. upiu_flags = UPIU_CMD_FLAGS_WRITE;
  510. } else {
  511. data_direction = UTP_NO_DATA_TRANSFER;
  512. upiu_flags = UPIU_CMD_FLAGS_NONE;
  513. }
  514. /* Transfer request descriptor header fields */
  515. req_desc->header.dword_0 =
  516. cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
  517. /*
  518. * assigning invalid value for command status. Controller
  519. * updates OCS on command completion, with the command
  520. * status
  521. */
  522. req_desc->header.dword_2 =
  523. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  524. /* command descriptor fields */
  525. ucd_cmd_ptr->header.dword_0 =
  526. cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
  527. upiu_flags,
  528. lrbp->lun,
  529. lrbp->task_tag));
  530. ucd_cmd_ptr->header.dword_1 =
  531. cpu_to_be32(
  532. UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
  533. 0,
  534. 0,
  535. 0));
  536. /* Total EHS length and Data segment length will be zero */
  537. ucd_cmd_ptr->header.dword_2 = 0;
  538. ucd_cmd_ptr->exp_data_transfer_len =
  539. cpu_to_be32(lrbp->cmd->sdb.length);
  540. memcpy(ucd_cmd_ptr->cdb,
  541. lrbp->cmd->cmnd,
  542. (min_t(unsigned short,
  543. lrbp->cmd->cmd_len,
  544. MAX_CDB_SIZE)));
  545. break;
  546. case UTP_CMD_TYPE_DEV_MANAGE:
  547. /* For query function implementation */
  548. break;
  549. case UTP_CMD_TYPE_UFS:
  550. /* For UFS native command implementation */
  551. break;
  552. } /* end of switch */
  553. }
  554. /**
  555. * ufshcd_queuecommand - main entry point for SCSI requests
  556. * @cmd: command from SCSI Midlayer
  557. * @done: call back function
  558. *
  559. * Returns 0 for success, non-zero in case of failure
  560. */
  561. static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  562. {
  563. struct ufshcd_lrb *lrbp;
  564. struct ufs_hba *hba;
  565. unsigned long flags;
  566. int tag;
  567. int err = 0;
  568. hba = shost_priv(host);
  569. tag = cmd->request->tag;
  570. if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
  571. err = SCSI_MLQUEUE_HOST_BUSY;
  572. goto out;
  573. }
  574. lrbp = &hba->lrb[tag];
  575. lrbp->cmd = cmd;
  576. lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
  577. lrbp->sense_buffer = cmd->sense_buffer;
  578. lrbp->task_tag = tag;
  579. lrbp->lun = cmd->device->lun;
  580. lrbp->command_type = UTP_CMD_TYPE_SCSI;
  581. /* form UPIU before issuing the command */
  582. ufshcd_compose_upiu(lrbp);
  583. err = ufshcd_map_sg(lrbp);
  584. if (err)
  585. goto out;
  586. /* issue command to the controller */
  587. spin_lock_irqsave(hba->host->host_lock, flags);
  588. ufshcd_send_command(hba, tag);
  589. spin_unlock_irqrestore(hba->host->host_lock, flags);
  590. out:
  591. return err;
  592. }
  593. /**
  594. * ufshcd_memory_alloc - allocate memory for host memory space data structures
  595. * @hba: per adapter instance
  596. *
  597. * 1. Allocate DMA memory for Command Descriptor array
  598. * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
  599. * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
  600. * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
  601. * (UTMRDL)
  602. * 4. Allocate memory for local reference block(lrb).
  603. *
  604. * Returns 0 for success, non-zero in case of failure
  605. */
  606. static int ufshcd_memory_alloc(struct ufs_hba *hba)
  607. {
  608. size_t utmrdl_size, utrdl_size, ucdl_size;
  609. /* Allocate memory for UTP command descriptors */
  610. ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  611. hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
  612. ucdl_size,
  613. &hba->ucdl_dma_addr,
  614. GFP_KERNEL);
  615. /*
  616. * UFSHCI requires UTP command descriptor to be 128 byte aligned.
  617. * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
  618. * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
  619. * be aligned to 128 bytes as well
  620. */
  621. if (!hba->ucdl_base_addr ||
  622. WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
  623. dev_err(hba->dev,
  624. "Command Descriptor Memory allocation failed\n");
  625. goto out;
  626. }
  627. /*
  628. * Allocate memory for UTP Transfer descriptors
  629. * UFSHCI requires 1024 byte alignment of UTRD
  630. */
  631. utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  632. hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
  633. utrdl_size,
  634. &hba->utrdl_dma_addr,
  635. GFP_KERNEL);
  636. if (!hba->utrdl_base_addr ||
  637. WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
  638. dev_err(hba->dev,
  639. "Transfer Descriptor Memory allocation failed\n");
  640. goto out;
  641. }
  642. /*
  643. * Allocate memory for UTP Task Management descriptors
  644. * UFSHCI requires 1024 byte alignment of UTMRD
  645. */
  646. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  647. hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
  648. utmrdl_size,
  649. &hba->utmrdl_dma_addr,
  650. GFP_KERNEL);
  651. if (!hba->utmrdl_base_addr ||
  652. WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
  653. dev_err(hba->dev,
  654. "Task Management Descriptor Memory allocation failed\n");
  655. goto out;
  656. }
  657. /* Allocate memory for local reference block */
  658. hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
  659. if (!hba->lrb) {
  660. dev_err(hba->dev, "LRB Memory allocation failed\n");
  661. goto out;
  662. }
  663. return 0;
  664. out:
  665. ufshcd_free_hba_memory(hba);
  666. return -ENOMEM;
  667. }
  668. /**
  669. * ufshcd_host_memory_configure - configure local reference block with
  670. * memory offsets
  671. * @hba: per adapter instance
  672. *
  673. * Configure Host memory space
  674. * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
  675. * address.
  676. * 2. Update each UTRD with Response UPIU offset, Response UPIU length
  677. * and PRDT offset.
  678. * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
  679. * into local reference block.
  680. */
  681. static void ufshcd_host_memory_configure(struct ufs_hba *hba)
  682. {
  683. struct utp_transfer_cmd_desc *cmd_descp;
  684. struct utp_transfer_req_desc *utrdlp;
  685. dma_addr_t cmd_desc_dma_addr;
  686. dma_addr_t cmd_desc_element_addr;
  687. u16 response_offset;
  688. u16 prdt_offset;
  689. int cmd_desc_size;
  690. int i;
  691. utrdlp = hba->utrdl_base_addr;
  692. cmd_descp = hba->ucdl_base_addr;
  693. response_offset =
  694. offsetof(struct utp_transfer_cmd_desc, response_upiu);
  695. prdt_offset =
  696. offsetof(struct utp_transfer_cmd_desc, prd_table);
  697. cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
  698. cmd_desc_dma_addr = hba->ucdl_dma_addr;
  699. for (i = 0; i < hba->nutrs; i++) {
  700. /* Configure UTRD with command descriptor base address */
  701. cmd_desc_element_addr =
  702. (cmd_desc_dma_addr + (cmd_desc_size * i));
  703. utrdlp[i].command_desc_base_addr_lo =
  704. cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
  705. utrdlp[i].command_desc_base_addr_hi =
  706. cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
  707. /* Response upiu and prdt offset should be in double words */
  708. utrdlp[i].response_upiu_offset =
  709. cpu_to_le16((response_offset >> 2));
  710. utrdlp[i].prd_table_offset =
  711. cpu_to_le16((prdt_offset >> 2));
  712. utrdlp[i].response_upiu_length =
  713. cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  714. hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
  715. hba->lrb[i].ucd_cmd_ptr =
  716. (struct utp_upiu_cmd *)(cmd_descp + i);
  717. hba->lrb[i].ucd_rsp_ptr =
  718. (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
  719. hba->lrb[i].ucd_prdt_ptr =
  720. (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
  721. }
  722. }
  723. /**
  724. * ufshcd_dme_link_startup - Notify Unipro to perform link startup
  725. * @hba: per adapter instance
  726. *
  727. * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
  728. * in order to initialize the Unipro link startup procedure.
  729. * Once the Unipro links are up, the device connected to the controller
  730. * is detected.
  731. *
  732. * Returns 0 on success, non-zero value on failure
  733. */
  734. static int ufshcd_dme_link_startup(struct ufs_hba *hba)
  735. {
  736. struct uic_command uic_cmd = {0};
  737. int ret;
  738. uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
  739. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  740. if (ret)
  741. dev_err(hba->dev,
  742. "dme-link-startup: error code %d\n", ret);
  743. return ret;
  744. }
  745. /**
  746. * ufshcd_make_hba_operational - Make UFS controller operational
  747. * @hba: per adapter instance
  748. *
  749. * To bring UFS host controller to operational state,
  750. * 1. Check if device is present
  751. * 2. Enable required interrupts
  752. * 3. Configure interrupt aggregation
  753. * 4. Program UTRL and UTMRL base addres
  754. * 5. Configure run-stop-registers
  755. *
  756. * Returns 0 on success, non-zero value on failure
  757. */
  758. static int ufshcd_make_hba_operational(struct ufs_hba *hba)
  759. {
  760. int err = 0;
  761. u32 reg;
  762. /* check if device present */
  763. reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
  764. if (!ufshcd_is_device_present(reg)) {
  765. dev_err(hba->dev, "cc: Device not present\n");
  766. err = -ENXIO;
  767. goto out;
  768. }
  769. /* Enable required interrupts */
  770. ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
  771. /* Configure interrupt aggregation */
  772. ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
  773. /* Configure UTRL and UTMRL base address registers */
  774. ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
  775. REG_UTP_TRANSFER_REQ_LIST_BASE_L);
  776. ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
  777. REG_UTP_TRANSFER_REQ_LIST_BASE_H);
  778. ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
  779. REG_UTP_TASK_REQ_LIST_BASE_L);
  780. ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
  781. REG_UTP_TASK_REQ_LIST_BASE_H);
  782. /*
  783. * UCRDY, UTMRLDY and UTRLRDY bits must be 1
  784. * DEI, HEI bits must be 0
  785. */
  786. if (!(ufshcd_get_lists_status(reg))) {
  787. ufshcd_enable_run_stop_reg(hba);
  788. } else {
  789. dev_err(hba->dev,
  790. "Host controller not ready to process requests");
  791. err = -EIO;
  792. goto out;
  793. }
  794. if (hba->ufshcd_state == UFSHCD_STATE_RESET)
  795. scsi_unblock_requests(hba->host);
  796. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  797. out:
  798. return err;
  799. }
  800. /**
  801. * ufshcd_hba_enable - initialize the controller
  802. * @hba: per adapter instance
  803. *
  804. * The controller resets itself and controller firmware initialization
  805. * sequence kicks off. When controller is ready it will set
  806. * the Host Controller Enable bit to 1.
  807. *
  808. * Returns 0 on success, non-zero value on failure
  809. */
  810. static int ufshcd_hba_enable(struct ufs_hba *hba)
  811. {
  812. int retry;
  813. /*
  814. * msleep of 1 and 5 used in this function might result in msleep(20),
  815. * but it was necessary to send the UFS FPGA to reset mode during
  816. * development and testing of this driver. msleep can be changed to
  817. * mdelay and retry count can be reduced based on the controller.
  818. */
  819. if (!ufshcd_is_hba_active(hba)) {
  820. /* change controller state to "reset state" */
  821. ufshcd_hba_stop(hba);
  822. /*
  823. * This delay is based on the testing done with UFS host
  824. * controller FPGA. The delay can be changed based on the
  825. * host controller used.
  826. */
  827. msleep(5);
  828. }
  829. /* start controller initialization sequence */
  830. ufshcd_hba_start(hba);
  831. /*
  832. * To initialize a UFS host controller HCE bit must be set to 1.
  833. * During initialization the HCE bit value changes from 1->0->1.
  834. * When the host controller completes initialization sequence
  835. * it sets the value of HCE bit to 1. The same HCE bit is read back
  836. * to check if the controller has completed initialization sequence.
  837. * So without this delay the value HCE = 1, set in the previous
  838. * instruction might be read back.
  839. * This delay can be changed based on the controller.
  840. */
  841. msleep(1);
  842. /* wait for the host controller to complete initialization */
  843. retry = 10;
  844. while (ufshcd_is_hba_active(hba)) {
  845. if (retry) {
  846. retry--;
  847. } else {
  848. dev_err(hba->dev,
  849. "Controller enable failed\n");
  850. return -EIO;
  851. }
  852. msleep(5);
  853. }
  854. return 0;
  855. }
  856. /**
  857. * ufshcd_link_startup - Initialize unipro link startup
  858. * @hba: per adapter instance
  859. *
  860. * Returns 0 for success, non-zero in case of failure
  861. */
  862. static int ufshcd_link_startup(struct ufs_hba *hba)
  863. {
  864. int ret;
  865. /* enable UIC related interrupts */
  866. ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
  867. ret = ufshcd_dme_link_startup(hba);
  868. if (ret)
  869. goto out;
  870. ret = ufshcd_make_hba_operational(hba);
  871. out:
  872. if (ret)
  873. dev_err(hba->dev, "link startup failed %d\n", ret);
  874. return ret;
  875. }
  876. /**
  877. * ufshcd_do_reset - reset the host controller
  878. * @hba: per adapter instance
  879. *
  880. * Returns SUCCESS/FAILED
  881. */
  882. static int ufshcd_do_reset(struct ufs_hba *hba)
  883. {
  884. struct ufshcd_lrb *lrbp;
  885. unsigned long flags;
  886. int tag;
  887. /* block commands from midlayer */
  888. scsi_block_requests(hba->host);
  889. spin_lock_irqsave(hba->host->host_lock, flags);
  890. hba->ufshcd_state = UFSHCD_STATE_RESET;
  891. /* send controller to reset state */
  892. ufshcd_hba_stop(hba);
  893. spin_unlock_irqrestore(hba->host->host_lock, flags);
  894. /* abort outstanding commands */
  895. for (tag = 0; tag < hba->nutrs; tag++) {
  896. if (test_bit(tag, &hba->outstanding_reqs)) {
  897. lrbp = &hba->lrb[tag];
  898. scsi_dma_unmap(lrbp->cmd);
  899. lrbp->cmd->result = DID_RESET << 16;
  900. lrbp->cmd->scsi_done(lrbp->cmd);
  901. lrbp->cmd = NULL;
  902. }
  903. }
  904. /* clear outstanding request/task bit maps */
  905. hba->outstanding_reqs = 0;
  906. hba->outstanding_tasks = 0;
  907. /* Host controller enable */
  908. if (ufshcd_hba_enable(hba)) {
  909. dev_err(hba->dev,
  910. "Reset: Controller initialization failed\n");
  911. return FAILED;
  912. }
  913. if (ufshcd_link_startup(hba)) {
  914. dev_err(hba->dev,
  915. "Reset: Link start-up failed\n");
  916. return FAILED;
  917. }
  918. return SUCCESS;
  919. }
  920. /**
  921. * ufshcd_slave_alloc - handle initial SCSI device configurations
  922. * @sdev: pointer to SCSI device
  923. *
  924. * Returns success
  925. */
  926. static int ufshcd_slave_alloc(struct scsi_device *sdev)
  927. {
  928. struct ufs_hba *hba;
  929. hba = shost_priv(sdev->host);
  930. sdev->tagged_supported = 1;
  931. /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
  932. sdev->use_10_for_ms = 1;
  933. scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
  934. /*
  935. * Inform SCSI Midlayer that the LUN queue depth is same as the
  936. * controller queue depth. If a LUN queue depth is less than the
  937. * controller queue depth and if the LUN reports
  938. * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
  939. * with scsi_adjust_queue_depth.
  940. */
  941. scsi_activate_tcq(sdev, hba->nutrs);
  942. return 0;
  943. }
  944. /**
  945. * ufshcd_slave_destroy - remove SCSI device configurations
  946. * @sdev: pointer to SCSI device
  947. */
  948. static void ufshcd_slave_destroy(struct scsi_device *sdev)
  949. {
  950. struct ufs_hba *hba;
  951. hba = shost_priv(sdev->host);
  952. scsi_deactivate_tcq(sdev, hba->nutrs);
  953. }
  954. /**
  955. * ufshcd_task_req_compl - handle task management request completion
  956. * @hba: per adapter instance
  957. * @index: index of the completed request
  958. *
  959. * Returns SUCCESS/FAILED
  960. */
  961. static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
  962. {
  963. struct utp_task_req_desc *task_req_descp;
  964. struct utp_upiu_task_rsp *task_rsp_upiup;
  965. unsigned long flags;
  966. int ocs_value;
  967. int task_result;
  968. spin_lock_irqsave(hba->host->host_lock, flags);
  969. /* Clear completed tasks from outstanding_tasks */
  970. __clear_bit(index, &hba->outstanding_tasks);
  971. task_req_descp = hba->utmrdl_base_addr;
  972. ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
  973. if (ocs_value == OCS_SUCCESS) {
  974. task_rsp_upiup = (struct utp_upiu_task_rsp *)
  975. task_req_descp[index].task_rsp_upiu;
  976. task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
  977. task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
  978. if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
  979. task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
  980. task_result = FAILED;
  981. else
  982. task_result = SUCCESS;
  983. } else {
  984. task_result = FAILED;
  985. dev_err(hba->dev,
  986. "trc: Invalid ocs = %x\n", ocs_value);
  987. }
  988. spin_unlock_irqrestore(hba->host->host_lock, flags);
  989. return task_result;
  990. }
  991. /**
  992. * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
  993. * SAM_STAT_TASK_SET_FULL SCSI command status.
  994. * @cmd: pointer to SCSI command
  995. */
  996. static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
  997. {
  998. struct ufs_hba *hba;
  999. int i;
  1000. int lun_qdepth = 0;
  1001. hba = shost_priv(cmd->device->host);
  1002. /*
  1003. * LUN queue depth can be obtained by counting outstanding commands
  1004. * on the LUN.
  1005. */
  1006. for (i = 0; i < hba->nutrs; i++) {
  1007. if (test_bit(i, &hba->outstanding_reqs)) {
  1008. /*
  1009. * Check if the outstanding command belongs
  1010. * to the LUN which reported SAM_STAT_TASK_SET_FULL.
  1011. */
  1012. if (cmd->device->lun == hba->lrb[i].lun)
  1013. lun_qdepth++;
  1014. }
  1015. }
  1016. /*
  1017. * LUN queue depth will be total outstanding commands, except the
  1018. * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
  1019. */
  1020. scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
  1021. }
  1022. /**
  1023. * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  1024. * @lrb: pointer to local reference block of completed command
  1025. * @scsi_status: SCSI command status
  1026. *
  1027. * Returns value base on SCSI command status
  1028. */
  1029. static inline int
  1030. ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
  1031. {
  1032. int result = 0;
  1033. switch (scsi_status) {
  1034. case SAM_STAT_GOOD:
  1035. result |= DID_OK << 16 |
  1036. COMMAND_COMPLETE << 8 |
  1037. SAM_STAT_GOOD;
  1038. break;
  1039. case SAM_STAT_CHECK_CONDITION:
  1040. result |= DID_OK << 16 |
  1041. COMMAND_COMPLETE << 8 |
  1042. SAM_STAT_CHECK_CONDITION;
  1043. ufshcd_copy_sense_data(lrbp);
  1044. break;
  1045. case SAM_STAT_BUSY:
  1046. result |= SAM_STAT_BUSY;
  1047. break;
  1048. case SAM_STAT_TASK_SET_FULL:
  1049. /*
  1050. * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
  1051. * depth needs to be adjusted to the exact number of
  1052. * outstanding commands the LUN can handle at any given time.
  1053. */
  1054. ufshcd_adjust_lun_qdepth(lrbp->cmd);
  1055. result |= SAM_STAT_TASK_SET_FULL;
  1056. break;
  1057. case SAM_STAT_TASK_ABORTED:
  1058. result |= SAM_STAT_TASK_ABORTED;
  1059. break;
  1060. default:
  1061. result |= DID_ERROR << 16;
  1062. break;
  1063. } /* end of switch */
  1064. return result;
  1065. }
  1066. /**
  1067. * ufshcd_transfer_rsp_status - Get overall status of the response
  1068. * @hba: per adapter instance
  1069. * @lrb: pointer to local reference block of completed command
  1070. *
  1071. * Returns result of the command to notify SCSI midlayer
  1072. */
  1073. static inline int
  1074. ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1075. {
  1076. int result = 0;
  1077. int scsi_status;
  1078. int ocs;
  1079. /* overall command status of utrd */
  1080. ocs = ufshcd_get_tr_ocs(lrbp);
  1081. switch (ocs) {
  1082. case OCS_SUCCESS:
  1083. /* check if the returned transfer response is valid */
  1084. result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
  1085. if (result) {
  1086. dev_err(hba->dev,
  1087. "Invalid response = %x\n", result);
  1088. break;
  1089. }
  1090. /*
  1091. * get the response UPIU result to extract
  1092. * the SCSI command status
  1093. */
  1094. result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
  1095. /*
  1096. * get the result based on SCSI status response
  1097. * to notify the SCSI midlayer of the command status
  1098. */
  1099. scsi_status = result & MASK_SCSI_STATUS;
  1100. result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  1101. break;
  1102. case OCS_ABORTED:
  1103. result |= DID_ABORT << 16;
  1104. break;
  1105. case OCS_INVALID_CMD_TABLE_ATTR:
  1106. case OCS_INVALID_PRDT_ATTR:
  1107. case OCS_MISMATCH_DATA_BUF_SIZE:
  1108. case OCS_MISMATCH_RESP_UPIU_SIZE:
  1109. case OCS_PEER_COMM_FAILURE:
  1110. case OCS_FATAL_ERROR:
  1111. default:
  1112. result |= DID_ERROR << 16;
  1113. dev_err(hba->dev,
  1114. "OCS error from controller = %x\n", ocs);
  1115. break;
  1116. } /* end of switch */
  1117. return result;
  1118. }
  1119. /**
  1120. * ufshcd_uic_cmd_compl - handle completion of uic command
  1121. * @hba: per adapter instance
  1122. */
  1123. static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
  1124. {
  1125. if (hba->active_uic_cmd) {
  1126. hba->active_uic_cmd->argument2 |=
  1127. ufshcd_get_uic_cmd_result(hba);
  1128. complete(&hba->active_uic_cmd->done);
  1129. }
  1130. }
  1131. /**
  1132. * ufshcd_transfer_req_compl - handle SCSI and query command completion
  1133. * @hba: per adapter instance
  1134. */
  1135. static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
  1136. {
  1137. struct ufshcd_lrb *lrb;
  1138. unsigned long completed_reqs;
  1139. u32 tr_doorbell;
  1140. int result;
  1141. int index;
  1142. lrb = hba->lrb;
  1143. tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  1144. completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
  1145. for (index = 0; index < hba->nutrs; index++) {
  1146. if (test_bit(index, &completed_reqs)) {
  1147. result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
  1148. if (lrb[index].cmd) {
  1149. scsi_dma_unmap(lrb[index].cmd);
  1150. lrb[index].cmd->result = result;
  1151. lrb[index].cmd->scsi_done(lrb[index].cmd);
  1152. /* Mark completed command as NULL in LRB */
  1153. lrb[index].cmd = NULL;
  1154. }
  1155. } /* end of if */
  1156. } /* end of for */
  1157. /* clear corresponding bits of completed commands */
  1158. hba->outstanding_reqs ^= completed_reqs;
  1159. /* Reset interrupt aggregation counters */
  1160. ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
  1161. }
  1162. /**
  1163. * ufshcd_fatal_err_handler - handle fatal errors
  1164. * @hba: per adapter instance
  1165. */
  1166. static void ufshcd_fatal_err_handler(struct work_struct *work)
  1167. {
  1168. struct ufs_hba *hba;
  1169. hba = container_of(work, struct ufs_hba, feh_workq);
  1170. /* check if reset is already in progress */
  1171. if (hba->ufshcd_state != UFSHCD_STATE_RESET)
  1172. ufshcd_do_reset(hba);
  1173. }
  1174. /**
  1175. * ufshcd_err_handler - Check for fatal errors
  1176. * @work: pointer to a work queue structure
  1177. */
  1178. static void ufshcd_err_handler(struct ufs_hba *hba)
  1179. {
  1180. u32 reg;
  1181. if (hba->errors & INT_FATAL_ERRORS)
  1182. goto fatal_eh;
  1183. if (hba->errors & UIC_ERROR) {
  1184. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
  1185. if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  1186. goto fatal_eh;
  1187. }
  1188. return;
  1189. fatal_eh:
  1190. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  1191. schedule_work(&hba->feh_workq);
  1192. }
  1193. /**
  1194. * ufshcd_tmc_handler - handle task management function completion
  1195. * @hba: per adapter instance
  1196. */
  1197. static void ufshcd_tmc_handler(struct ufs_hba *hba)
  1198. {
  1199. u32 tm_doorbell;
  1200. tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
  1201. hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
  1202. wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
  1203. }
  1204. /**
  1205. * ufshcd_sl_intr - Interrupt service routine
  1206. * @hba: per adapter instance
  1207. * @intr_status: contains interrupts generated by the controller
  1208. */
  1209. static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  1210. {
  1211. hba->errors = UFSHCD_ERROR_MASK & intr_status;
  1212. if (hba->errors)
  1213. ufshcd_err_handler(hba);
  1214. if (intr_status & UIC_COMMAND_COMPL)
  1215. ufshcd_uic_cmd_compl(hba);
  1216. if (intr_status & UTP_TASK_REQ_COMPL)
  1217. ufshcd_tmc_handler(hba);
  1218. if (intr_status & UTP_TRANSFER_REQ_COMPL)
  1219. ufshcd_transfer_req_compl(hba);
  1220. }
  1221. /**
  1222. * ufshcd_intr - Main interrupt service routine
  1223. * @irq: irq number
  1224. * @__hba: pointer to adapter instance
  1225. *
  1226. * Returns IRQ_HANDLED - If interrupt is valid
  1227. * IRQ_NONE - If invalid interrupt
  1228. */
  1229. static irqreturn_t ufshcd_intr(int irq, void *__hba)
  1230. {
  1231. u32 intr_status;
  1232. irqreturn_t retval = IRQ_NONE;
  1233. struct ufs_hba *hba = __hba;
  1234. spin_lock(hba->host->host_lock);
  1235. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  1236. if (intr_status) {
  1237. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  1238. ufshcd_sl_intr(hba, intr_status);
  1239. retval = IRQ_HANDLED;
  1240. }
  1241. spin_unlock(hba->host->host_lock);
  1242. return retval;
  1243. }
  1244. /**
  1245. * ufshcd_issue_tm_cmd - issues task management commands to controller
  1246. * @hba: per adapter instance
  1247. * @lrbp: pointer to local reference block
  1248. *
  1249. * Returns SUCCESS/FAILED
  1250. */
  1251. static int
  1252. ufshcd_issue_tm_cmd(struct ufs_hba *hba,
  1253. struct ufshcd_lrb *lrbp,
  1254. u8 tm_function)
  1255. {
  1256. struct utp_task_req_desc *task_req_descp;
  1257. struct utp_upiu_task_req *task_req_upiup;
  1258. struct Scsi_Host *host;
  1259. unsigned long flags;
  1260. int free_slot = 0;
  1261. int err;
  1262. host = hba->host;
  1263. spin_lock_irqsave(host->host_lock, flags);
  1264. /* If task management queue is full */
  1265. free_slot = ufshcd_get_tm_free_slot(hba);
  1266. if (free_slot >= hba->nutmrs) {
  1267. spin_unlock_irqrestore(host->host_lock, flags);
  1268. dev_err(hba->dev, "Task management queue full\n");
  1269. err = FAILED;
  1270. goto out;
  1271. }
  1272. task_req_descp = hba->utmrdl_base_addr;
  1273. task_req_descp += free_slot;
  1274. /* Configure task request descriptor */
  1275. task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
  1276. task_req_descp->header.dword_2 =
  1277. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  1278. /* Configure task request UPIU */
  1279. task_req_upiup =
  1280. (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
  1281. task_req_upiup->header.dword_0 =
  1282. cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
  1283. lrbp->lun, lrbp->task_tag));
  1284. task_req_upiup->header.dword_1 =
  1285. cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
  1286. task_req_upiup->input_param1 = lrbp->lun;
  1287. task_req_upiup->input_param1 =
  1288. cpu_to_be32(task_req_upiup->input_param1);
  1289. task_req_upiup->input_param2 = lrbp->task_tag;
  1290. task_req_upiup->input_param2 =
  1291. cpu_to_be32(task_req_upiup->input_param2);
  1292. /* send command to the controller */
  1293. __set_bit(free_slot, &hba->outstanding_tasks);
  1294. ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
  1295. spin_unlock_irqrestore(host->host_lock, flags);
  1296. /* wait until the task management command is completed */
  1297. err =
  1298. wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
  1299. (test_bit(free_slot,
  1300. &hba->tm_condition) != 0),
  1301. 60 * HZ);
  1302. if (!err) {
  1303. dev_err(hba->dev,
  1304. "Task management command timed-out\n");
  1305. err = FAILED;
  1306. goto out;
  1307. }
  1308. clear_bit(free_slot, &hba->tm_condition);
  1309. err = ufshcd_task_req_compl(hba, free_slot);
  1310. out:
  1311. return err;
  1312. }
  1313. /**
  1314. * ufshcd_device_reset - reset device and abort all the pending commands
  1315. * @cmd: SCSI command pointer
  1316. *
  1317. * Returns SUCCESS/FAILED
  1318. */
  1319. static int ufshcd_device_reset(struct scsi_cmnd *cmd)
  1320. {
  1321. struct Scsi_Host *host;
  1322. struct ufs_hba *hba;
  1323. unsigned int tag;
  1324. u32 pos;
  1325. int err;
  1326. host = cmd->device->host;
  1327. hba = shost_priv(host);
  1328. tag = cmd->request->tag;
  1329. err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
  1330. if (err == FAILED)
  1331. goto out;
  1332. for (pos = 0; pos < hba->nutrs; pos++) {
  1333. if (test_bit(pos, &hba->outstanding_reqs) &&
  1334. (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
  1335. /* clear the respective UTRLCLR register bit */
  1336. ufshcd_utrl_clear(hba, pos);
  1337. clear_bit(pos, &hba->outstanding_reqs);
  1338. if (hba->lrb[pos].cmd) {
  1339. scsi_dma_unmap(hba->lrb[pos].cmd);
  1340. hba->lrb[pos].cmd->result =
  1341. DID_ABORT << 16;
  1342. hba->lrb[pos].cmd->scsi_done(cmd);
  1343. hba->lrb[pos].cmd = NULL;
  1344. }
  1345. }
  1346. } /* end of for */
  1347. out:
  1348. return err;
  1349. }
  1350. /**
  1351. * ufshcd_host_reset - Main reset function registered with scsi layer
  1352. * @cmd: SCSI command pointer
  1353. *
  1354. * Returns SUCCESS/FAILED
  1355. */
  1356. static int ufshcd_host_reset(struct scsi_cmnd *cmd)
  1357. {
  1358. struct ufs_hba *hba;
  1359. hba = shost_priv(cmd->device->host);
  1360. if (hba->ufshcd_state == UFSHCD_STATE_RESET)
  1361. return SUCCESS;
  1362. return ufshcd_do_reset(hba);
  1363. }
  1364. /**
  1365. * ufshcd_abort - abort a specific command
  1366. * @cmd: SCSI command pointer
  1367. *
  1368. * Returns SUCCESS/FAILED
  1369. */
  1370. static int ufshcd_abort(struct scsi_cmnd *cmd)
  1371. {
  1372. struct Scsi_Host *host;
  1373. struct ufs_hba *hba;
  1374. unsigned long flags;
  1375. unsigned int tag;
  1376. int err;
  1377. host = cmd->device->host;
  1378. hba = shost_priv(host);
  1379. tag = cmd->request->tag;
  1380. spin_lock_irqsave(host->host_lock, flags);
  1381. /* check if command is still pending */
  1382. if (!(test_bit(tag, &hba->outstanding_reqs))) {
  1383. err = FAILED;
  1384. spin_unlock_irqrestore(host->host_lock, flags);
  1385. goto out;
  1386. }
  1387. spin_unlock_irqrestore(host->host_lock, flags);
  1388. err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
  1389. if (err == FAILED)
  1390. goto out;
  1391. scsi_dma_unmap(cmd);
  1392. spin_lock_irqsave(host->host_lock, flags);
  1393. /* clear the respective UTRLCLR register bit */
  1394. ufshcd_utrl_clear(hba, tag);
  1395. __clear_bit(tag, &hba->outstanding_reqs);
  1396. hba->lrb[tag].cmd = NULL;
  1397. spin_unlock_irqrestore(host->host_lock, flags);
  1398. out:
  1399. return err;
  1400. }
  1401. /**
  1402. * ufshcd_async_scan - asynchronous execution for link startup
  1403. * @data: data pointer to pass to this function
  1404. * @cookie: cookie data
  1405. */
  1406. static void ufshcd_async_scan(void *data, async_cookie_t cookie)
  1407. {
  1408. struct ufs_hba *hba = (struct ufs_hba *)data;
  1409. int ret;
  1410. ret = ufshcd_link_startup(hba);
  1411. if (!ret)
  1412. scsi_scan_host(hba->host);
  1413. }
  1414. static struct scsi_host_template ufshcd_driver_template = {
  1415. .module = THIS_MODULE,
  1416. .name = UFSHCD,
  1417. .proc_name = UFSHCD,
  1418. .queuecommand = ufshcd_queuecommand,
  1419. .slave_alloc = ufshcd_slave_alloc,
  1420. .slave_destroy = ufshcd_slave_destroy,
  1421. .eh_abort_handler = ufshcd_abort,
  1422. .eh_device_reset_handler = ufshcd_device_reset,
  1423. .eh_host_reset_handler = ufshcd_host_reset,
  1424. .this_id = -1,
  1425. .sg_tablesize = SG_ALL,
  1426. .cmd_per_lun = UFSHCD_CMD_PER_LUN,
  1427. .can_queue = UFSHCD_CAN_QUEUE,
  1428. };
  1429. /**
  1430. * ufshcd_suspend - suspend power management function
  1431. * @hba: per adapter instance
  1432. * @state: power state
  1433. *
  1434. * Returns -ENOSYS
  1435. */
  1436. int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
  1437. {
  1438. /*
  1439. * TODO:
  1440. * 1. Block SCSI requests from SCSI midlayer
  1441. * 2. Change the internal driver state to non operational
  1442. * 3. Set UTRLRSR and UTMRLRSR bits to zero
  1443. * 4. Wait until outstanding commands are completed
  1444. * 5. Set HCE to zero to send the UFS host controller to reset state
  1445. */
  1446. return -ENOSYS;
  1447. }
  1448. EXPORT_SYMBOL_GPL(ufshcd_suspend);
  1449. /**
  1450. * ufshcd_resume - resume power management function
  1451. * @hba: per adapter instance
  1452. *
  1453. * Returns -ENOSYS
  1454. */
  1455. int ufshcd_resume(struct ufs_hba *hba)
  1456. {
  1457. /*
  1458. * TODO:
  1459. * 1. Set HCE to 1, to start the UFS host controller
  1460. * initialization process
  1461. * 2. Set UTRLRSR and UTMRLRSR bits to 1
  1462. * 3. Change the internal driver state to operational
  1463. * 4. Unblock SCSI requests from SCSI midlayer
  1464. */
  1465. return -ENOSYS;
  1466. }
  1467. EXPORT_SYMBOL_GPL(ufshcd_resume);
  1468. /**
  1469. * ufshcd_hba_free - free allocated memory for
  1470. * host memory space data structures
  1471. * @hba: per adapter instance
  1472. */
  1473. static void ufshcd_hba_free(struct ufs_hba *hba)
  1474. {
  1475. iounmap(hba->mmio_base);
  1476. ufshcd_free_hba_memory(hba);
  1477. }
  1478. /**
  1479. * ufshcd_remove - de-allocate SCSI host and host memory space
  1480. * data structure memory
  1481. * @hba - per adapter instance
  1482. */
  1483. void ufshcd_remove(struct ufs_hba *hba)
  1484. {
  1485. /* disable interrupts */
  1486. ufshcd_disable_intr(hba, hba->intr_mask);
  1487. ufshcd_hba_stop(hba);
  1488. ufshcd_hba_free(hba);
  1489. scsi_remove_host(hba->host);
  1490. scsi_host_put(hba->host);
  1491. }
  1492. EXPORT_SYMBOL_GPL(ufshcd_remove);
  1493. /**
  1494. * ufshcd_init - Driver initialization routine
  1495. * @dev: pointer to device handle
  1496. * @hba_handle: driver private handle
  1497. * @mmio_base: base register address
  1498. * @irq: Interrupt line of device
  1499. * Returns 0 on success, non-zero value on failure
  1500. */
  1501. int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
  1502. void __iomem *mmio_base, unsigned int irq)
  1503. {
  1504. struct Scsi_Host *host;
  1505. struct ufs_hba *hba;
  1506. int err;
  1507. if (!dev) {
  1508. dev_err(dev,
  1509. "Invalid memory reference for dev is NULL\n");
  1510. err = -ENODEV;
  1511. goto out_error;
  1512. }
  1513. if (!mmio_base) {
  1514. dev_err(dev,
  1515. "Invalid memory reference for mmio_base is NULL\n");
  1516. err = -ENODEV;
  1517. goto out_error;
  1518. }
  1519. host = scsi_host_alloc(&ufshcd_driver_template,
  1520. sizeof(struct ufs_hba));
  1521. if (!host) {
  1522. dev_err(dev, "scsi_host_alloc failed\n");
  1523. err = -ENOMEM;
  1524. goto out_error;
  1525. }
  1526. hba = shost_priv(host);
  1527. hba->host = host;
  1528. hba->dev = dev;
  1529. hba->mmio_base = mmio_base;
  1530. hba->irq = irq;
  1531. /* Read capabilities registers */
  1532. ufshcd_hba_capabilities(hba);
  1533. /* Get UFS version supported by the controller */
  1534. hba->ufs_version = ufshcd_get_ufs_version(hba);
  1535. /* Get Interrupt bit mask per version */
  1536. hba->intr_mask = ufshcd_get_intr_mask(hba);
  1537. /* Allocate memory for host memory space */
  1538. err = ufshcd_memory_alloc(hba);
  1539. if (err) {
  1540. dev_err(hba->dev, "Memory allocation failed\n");
  1541. goto out_disable;
  1542. }
  1543. /* Configure LRB */
  1544. ufshcd_host_memory_configure(hba);
  1545. host->can_queue = hba->nutrs;
  1546. host->cmd_per_lun = hba->nutrs;
  1547. host->max_id = UFSHCD_MAX_ID;
  1548. host->max_lun = UFSHCD_MAX_LUNS;
  1549. host->max_channel = UFSHCD_MAX_CHANNEL;
  1550. host->unique_id = host->host_no;
  1551. host->max_cmd_len = MAX_CDB_SIZE;
  1552. /* Initailize wait queue for task management */
  1553. init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
  1554. /* Initialize work queues */
  1555. INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
  1556. /* Initialize UIC command mutex */
  1557. mutex_init(&hba->uic_cmd_mutex);
  1558. /* IRQ registration */
  1559. err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
  1560. if (err) {
  1561. dev_err(hba->dev, "request irq failed\n");
  1562. goto out_lrb_free;
  1563. }
  1564. /* Enable SCSI tag mapping */
  1565. err = scsi_init_shared_tag_map(host, host->can_queue);
  1566. if (err) {
  1567. dev_err(hba->dev, "init shared queue failed\n");
  1568. goto out_free_irq;
  1569. }
  1570. err = scsi_add_host(host, hba->dev);
  1571. if (err) {
  1572. dev_err(hba->dev, "scsi_add_host failed\n");
  1573. goto out_free_irq;
  1574. }
  1575. /* Host controller enable */
  1576. err = ufshcd_hba_enable(hba);
  1577. if (err) {
  1578. dev_err(hba->dev, "Host controller enable failed\n");
  1579. goto out_remove_scsi_host;
  1580. }
  1581. *hba_handle = hba;
  1582. async_schedule(ufshcd_async_scan, hba);
  1583. return 0;
  1584. out_remove_scsi_host:
  1585. scsi_remove_host(hba->host);
  1586. out_free_irq:
  1587. free_irq(irq, hba);
  1588. out_lrb_free:
  1589. ufshcd_free_hba_memory(hba);
  1590. out_disable:
  1591. scsi_host_put(host);
  1592. out_error:
  1593. return err;
  1594. }
  1595. EXPORT_SYMBOL_GPL(ufshcd_init);
  1596. MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
  1597. MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
  1598. MODULE_DESCRIPTION("Generic UFS host controller driver Core");
  1599. MODULE_LICENSE("GPL");
  1600. MODULE_VERSION(UFSHCD_DRIVER_VERSION);