host.c 97 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <linux/circ_buf.h>
  56. #include <linux/device.h>
  57. #include <scsi/sas.h>
  58. #include "host.h"
  59. #include "isci.h"
  60. #include "port.h"
  61. #include "host.h"
  62. #include "probe_roms.h"
  63. #include "remote_device.h"
  64. #include "request.h"
  65. #include "scu_completion_codes.h"
  66. #include "scu_event_codes.h"
  67. #include "registers.h"
  68. #include "scu_remote_node_context.h"
  69. #include "scu_task_context.h"
  70. #include "scu_unsolicited_frame.h"
  71. #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
  72. #define smu_max_ports(dcc_value) \
  73. (\
  74. (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
  75. >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
  76. )
  77. #define smu_max_task_contexts(dcc_value) \
  78. (\
  79. (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
  80. >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
  81. )
  82. #define smu_max_rncs(dcc_value) \
  83. (\
  84. (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
  85. >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
  86. )
  87. #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
  88. /**
  89. *
  90. *
  91. * The number of milliseconds to wait while a given phy is consuming power
  92. * before allowing another set of phys to consume power. Ultimately, this will
  93. * be specified by OEM parameter.
  94. */
  95. #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
  96. /**
  97. * NORMALIZE_PUT_POINTER() -
  98. *
  99. * This macro will normalize the completion queue put pointer so its value can
  100. * be used as an array inde
  101. */
  102. #define NORMALIZE_PUT_POINTER(x) \
  103. ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
  104. /**
  105. * NORMALIZE_EVENT_POINTER() -
  106. *
  107. * This macro will normalize the completion queue event entry so its value can
  108. * be used as an index.
  109. */
  110. #define NORMALIZE_EVENT_POINTER(x) \
  111. (\
  112. ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
  113. >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
  114. )
  115. /**
  116. * NORMALIZE_GET_POINTER() -
  117. *
  118. * This macro will normalize the completion queue get pointer so its value can
  119. * be used as an index into an array
  120. */
  121. #define NORMALIZE_GET_POINTER(x) \
  122. ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
  123. /**
  124. * NORMALIZE_GET_POINTER_CYCLE_BIT() -
  125. *
  126. * This macro will normalize the completion queue cycle pointer so it matches
  127. * the completion queue cycle bit
  128. */
  129. #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
  130. ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
  131. /**
  132. * COMPLETION_QUEUE_CYCLE_BIT() -
  133. *
  134. * This macro will return the cycle bit of the completion queue entry
  135. */
  136. #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
  137. /* Init the state machine and call the state entry function (if any) */
  138. void sci_init_sm(struct sci_base_state_machine *sm,
  139. const struct sci_base_state *state_table, u32 initial_state)
  140. {
  141. sci_state_transition_t handler;
  142. sm->initial_state_id = initial_state;
  143. sm->previous_state_id = initial_state;
  144. sm->current_state_id = initial_state;
  145. sm->state_table = state_table;
  146. handler = sm->state_table[initial_state].enter_state;
  147. if (handler)
  148. handler(sm);
  149. }
  150. /* Call the state exit fn, update the current state, call the state entry fn */
  151. void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
  152. {
  153. sci_state_transition_t handler;
  154. handler = sm->state_table[sm->current_state_id].exit_state;
  155. if (handler)
  156. handler(sm);
  157. sm->previous_state_id = sm->current_state_id;
  158. sm->current_state_id = next_state;
  159. handler = sm->state_table[sm->current_state_id].enter_state;
  160. if (handler)
  161. handler(sm);
  162. }
  163. static bool scic_sds_controller_completion_queue_has_entries(
  164. struct scic_sds_controller *scic)
  165. {
  166. u32 get_value = scic->completion_queue_get;
  167. u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
  168. if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
  169. COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
  170. return true;
  171. return false;
  172. }
  173. static bool scic_sds_controller_isr(struct scic_sds_controller *scic)
  174. {
  175. if (scic_sds_controller_completion_queue_has_entries(scic)) {
  176. return true;
  177. } else {
  178. /*
  179. * we have a spurious interrupt it could be that we have already
  180. * emptied the completion queue from a previous interrupt */
  181. writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
  182. /*
  183. * There is a race in the hardware that could cause us not to be notified
  184. * of an interrupt completion if we do not take this step. We will mask
  185. * then unmask the interrupts so if there is another interrupt pending
  186. * the clearing of the interrupt source we get the next interrupt message. */
  187. writel(0xFF000000, &scic->smu_registers->interrupt_mask);
  188. writel(0, &scic->smu_registers->interrupt_mask);
  189. }
  190. return false;
  191. }
  192. irqreturn_t isci_msix_isr(int vec, void *data)
  193. {
  194. struct isci_host *ihost = data;
  195. if (scic_sds_controller_isr(&ihost->sci))
  196. tasklet_schedule(&ihost->completion_tasklet);
  197. return IRQ_HANDLED;
  198. }
  199. static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
  200. {
  201. u32 interrupt_status;
  202. interrupt_status =
  203. readl(&scic->smu_registers->interrupt_status);
  204. interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
  205. if (interrupt_status != 0) {
  206. /*
  207. * There is an error interrupt pending so let it through and handle
  208. * in the callback */
  209. return true;
  210. }
  211. /*
  212. * There is a race in the hardware that could cause us not to be notified
  213. * of an interrupt completion if we do not take this step. We will mask
  214. * then unmask the error interrupts so if there was another interrupt
  215. * pending we will be notified.
  216. * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
  217. writel(0xff, &scic->smu_registers->interrupt_mask);
  218. writel(0, &scic->smu_registers->interrupt_mask);
  219. return false;
  220. }
  221. static void scic_sds_controller_task_completion(struct scic_sds_controller *scic,
  222. u32 completion_entry)
  223. {
  224. u32 index;
  225. struct scic_sds_request *sci_req;
  226. index = SCU_GET_COMPLETION_INDEX(completion_entry);
  227. sci_req = scic->io_request_table[index];
  228. /* Make sure that we really want to process this IO request */
  229. if (sci_req && sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
  230. ISCI_TAG_SEQ(sci_req->io_tag) == scic->io_request_sequence[index])
  231. /* Yep this is a valid io request pass it along to the io request handler */
  232. scic_sds_io_request_tc_completion(sci_req, completion_entry);
  233. }
  234. static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
  235. u32 completion_entry)
  236. {
  237. u32 index;
  238. struct scic_sds_request *io_request;
  239. struct scic_sds_remote_device *device;
  240. index = SCU_GET_COMPLETION_INDEX(completion_entry);
  241. switch (scu_get_command_request_type(completion_entry)) {
  242. case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
  243. case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
  244. io_request = scic->io_request_table[index];
  245. dev_warn(scic_to_dev(scic),
  246. "%s: SCIC SDS Completion type SDMA %x for io request "
  247. "%p\n",
  248. __func__,
  249. completion_entry,
  250. io_request);
  251. /* @todo For a post TC operation we need to fail the IO
  252. * request
  253. */
  254. break;
  255. case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
  256. case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
  257. case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
  258. device = scic->device_table[index];
  259. dev_warn(scic_to_dev(scic),
  260. "%s: SCIC SDS Completion type SDMA %x for remote "
  261. "device %p\n",
  262. __func__,
  263. completion_entry,
  264. device);
  265. /* @todo For a port RNC operation we need to fail the
  266. * device
  267. */
  268. break;
  269. default:
  270. dev_warn(scic_to_dev(scic),
  271. "%s: SCIC SDS Completion unknown SDMA completion "
  272. "type %x\n",
  273. __func__,
  274. completion_entry);
  275. break;
  276. }
  277. }
  278. static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
  279. u32 completion_entry)
  280. {
  281. u32 index;
  282. u32 frame_index;
  283. struct isci_host *ihost = scic_to_ihost(scic);
  284. struct scu_unsolicited_frame_header *frame_header;
  285. struct scic_sds_phy *phy;
  286. struct scic_sds_remote_device *device;
  287. enum sci_status result = SCI_FAILURE;
  288. frame_index = SCU_GET_FRAME_INDEX(completion_entry);
  289. frame_header = scic->uf_control.buffers.array[frame_index].header;
  290. scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
  291. if (SCU_GET_FRAME_ERROR(completion_entry)) {
  292. /*
  293. * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
  294. * / this cause a problem? We expect the phy initialization will
  295. * / fail if there is an error in the frame. */
  296. scic_sds_controller_release_frame(scic, frame_index);
  297. return;
  298. }
  299. if (frame_header->is_address_frame) {
  300. index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
  301. phy = &ihost->phys[index].sci;
  302. result = scic_sds_phy_frame_handler(phy, frame_index);
  303. } else {
  304. index = SCU_GET_COMPLETION_INDEX(completion_entry);
  305. if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
  306. /*
  307. * This is a signature fis or a frame from a direct attached SATA
  308. * device that has not yet been created. In either case forwared
  309. * the frame to the PE and let it take care of the frame data. */
  310. index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
  311. phy = &ihost->phys[index].sci;
  312. result = scic_sds_phy_frame_handler(phy, frame_index);
  313. } else {
  314. if (index < scic->remote_node_entries)
  315. device = scic->device_table[index];
  316. else
  317. device = NULL;
  318. if (device != NULL)
  319. result = scic_sds_remote_device_frame_handler(device, frame_index);
  320. else
  321. scic_sds_controller_release_frame(scic, frame_index);
  322. }
  323. }
  324. if (result != SCI_SUCCESS) {
  325. /*
  326. * / @todo Is there any reason to report some additional error message
  327. * / when we get this failure notifiction? */
  328. }
  329. }
  330. static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
  331. u32 completion_entry)
  332. {
  333. struct isci_host *ihost = scic_to_ihost(scic);
  334. struct scic_sds_request *io_request;
  335. struct scic_sds_remote_device *device;
  336. struct scic_sds_phy *phy;
  337. u32 index;
  338. index = SCU_GET_COMPLETION_INDEX(completion_entry);
  339. switch (scu_get_event_type(completion_entry)) {
  340. case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
  341. /* / @todo The driver did something wrong and we need to fix the condtion. */
  342. dev_err(scic_to_dev(scic),
  343. "%s: SCIC Controller 0x%p received SMU command error "
  344. "0x%x\n",
  345. __func__,
  346. scic,
  347. completion_entry);
  348. break;
  349. case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
  350. case SCU_EVENT_TYPE_SMU_ERROR:
  351. case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
  352. /*
  353. * / @todo This is a hardware failure and its likely that we want to
  354. * / reset the controller. */
  355. dev_err(scic_to_dev(scic),
  356. "%s: SCIC Controller 0x%p received fatal controller "
  357. "event 0x%x\n",
  358. __func__,
  359. scic,
  360. completion_entry);
  361. break;
  362. case SCU_EVENT_TYPE_TRANSPORT_ERROR:
  363. io_request = scic->io_request_table[index];
  364. scic_sds_io_request_event_handler(io_request, completion_entry);
  365. break;
  366. case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
  367. switch (scu_get_event_specifier(completion_entry)) {
  368. case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
  369. case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
  370. io_request = scic->io_request_table[index];
  371. if (io_request != NULL)
  372. scic_sds_io_request_event_handler(io_request, completion_entry);
  373. else
  374. dev_warn(scic_to_dev(scic),
  375. "%s: SCIC Controller 0x%p received "
  376. "event 0x%x for io request object "
  377. "that doesnt exist.\n",
  378. __func__,
  379. scic,
  380. completion_entry);
  381. break;
  382. case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
  383. device = scic->device_table[index];
  384. if (device != NULL)
  385. scic_sds_remote_device_event_handler(device, completion_entry);
  386. else
  387. dev_warn(scic_to_dev(scic),
  388. "%s: SCIC Controller 0x%p received "
  389. "event 0x%x for remote device object "
  390. "that doesnt exist.\n",
  391. __func__,
  392. scic,
  393. completion_entry);
  394. break;
  395. }
  396. break;
  397. case SCU_EVENT_TYPE_BROADCAST_CHANGE:
  398. /*
  399. * direct the broadcast change event to the phy first and then let
  400. * the phy redirect the broadcast change to the port object */
  401. case SCU_EVENT_TYPE_ERR_CNT_EVENT:
  402. /*
  403. * direct error counter event to the phy object since that is where
  404. * we get the event notification. This is a type 4 event. */
  405. case SCU_EVENT_TYPE_OSSP_EVENT:
  406. index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
  407. phy = &ihost->phys[index].sci;
  408. scic_sds_phy_event_handler(phy, completion_entry);
  409. break;
  410. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  411. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  412. case SCU_EVENT_TYPE_RNC_OPS_MISC:
  413. if (index < scic->remote_node_entries) {
  414. device = scic->device_table[index];
  415. if (device != NULL)
  416. scic_sds_remote_device_event_handler(device, completion_entry);
  417. } else
  418. dev_err(scic_to_dev(scic),
  419. "%s: SCIC Controller 0x%p received event 0x%x "
  420. "for remote device object 0x%0x that doesnt "
  421. "exist.\n",
  422. __func__,
  423. scic,
  424. completion_entry,
  425. index);
  426. break;
  427. default:
  428. dev_warn(scic_to_dev(scic),
  429. "%s: SCIC Controller received unknown event code %x\n",
  430. __func__,
  431. completion_entry);
  432. break;
  433. }
  434. }
  435. static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
  436. {
  437. u32 completion_count = 0;
  438. u32 completion_entry;
  439. u32 get_index;
  440. u32 get_cycle;
  441. u32 event_get;
  442. u32 event_cycle;
  443. dev_dbg(scic_to_dev(scic),
  444. "%s: completion queue begining get:0x%08x\n",
  445. __func__,
  446. scic->completion_queue_get);
  447. /* Get the component parts of the completion queue */
  448. get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
  449. get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
  450. event_get = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
  451. event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
  452. while (
  453. NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
  454. == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
  455. ) {
  456. completion_count++;
  457. completion_entry = scic->completion_queue[get_index];
  458. /* increment the get pointer and check for rollover to toggle the cycle bit */
  459. get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
  460. (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
  461. get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
  462. dev_dbg(scic_to_dev(scic),
  463. "%s: completion queue entry:0x%08x\n",
  464. __func__,
  465. completion_entry);
  466. switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
  467. case SCU_COMPLETION_TYPE_TASK:
  468. scic_sds_controller_task_completion(scic, completion_entry);
  469. break;
  470. case SCU_COMPLETION_TYPE_SDMA:
  471. scic_sds_controller_sdma_completion(scic, completion_entry);
  472. break;
  473. case SCU_COMPLETION_TYPE_UFI:
  474. scic_sds_controller_unsolicited_frame(scic, completion_entry);
  475. break;
  476. case SCU_COMPLETION_TYPE_EVENT:
  477. case SCU_COMPLETION_TYPE_NOTIFY: {
  478. event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
  479. (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
  480. event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
  481. scic_sds_controller_event_completion(scic, completion_entry);
  482. break;
  483. }
  484. default:
  485. dev_warn(scic_to_dev(scic),
  486. "%s: SCIC Controller received unknown "
  487. "completion type %x\n",
  488. __func__,
  489. completion_entry);
  490. break;
  491. }
  492. }
  493. /* Update the get register if we completed one or more entries */
  494. if (completion_count > 0) {
  495. scic->completion_queue_get =
  496. SMU_CQGR_GEN_BIT(ENABLE) |
  497. SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
  498. event_cycle |
  499. SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
  500. get_cycle |
  501. SMU_CQGR_GEN_VAL(POINTER, get_index);
  502. writel(scic->completion_queue_get,
  503. &scic->smu_registers->completion_queue_get);
  504. }
  505. dev_dbg(scic_to_dev(scic),
  506. "%s: completion queue ending get:0x%08x\n",
  507. __func__,
  508. scic->completion_queue_get);
  509. }
  510. static void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
  511. {
  512. u32 interrupt_status;
  513. interrupt_status =
  514. readl(&scic->smu_registers->interrupt_status);
  515. if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
  516. scic_sds_controller_completion_queue_has_entries(scic)) {
  517. scic_sds_controller_process_completions(scic);
  518. writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
  519. } else {
  520. dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
  521. interrupt_status);
  522. sci_change_state(&scic->sm, SCIC_FAILED);
  523. return;
  524. }
  525. /* If we dont process any completions I am not sure that we want to do this.
  526. * We are in the middle of a hardware fault and should probably be reset.
  527. */
  528. writel(0, &scic->smu_registers->interrupt_mask);
  529. }
  530. irqreturn_t isci_intx_isr(int vec, void *data)
  531. {
  532. irqreturn_t ret = IRQ_NONE;
  533. struct isci_host *ihost = data;
  534. struct scic_sds_controller *scic = &ihost->sci;
  535. if (scic_sds_controller_isr(scic)) {
  536. writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
  537. tasklet_schedule(&ihost->completion_tasklet);
  538. ret = IRQ_HANDLED;
  539. } else if (scic_sds_controller_error_isr(scic)) {
  540. spin_lock(&ihost->scic_lock);
  541. scic_sds_controller_error_handler(scic);
  542. spin_unlock(&ihost->scic_lock);
  543. ret = IRQ_HANDLED;
  544. }
  545. return ret;
  546. }
  547. irqreturn_t isci_error_isr(int vec, void *data)
  548. {
  549. struct isci_host *ihost = data;
  550. if (scic_sds_controller_error_isr(&ihost->sci))
  551. scic_sds_controller_error_handler(&ihost->sci);
  552. return IRQ_HANDLED;
  553. }
  554. /**
  555. * isci_host_start_complete() - This function is called by the core library,
  556. * through the ISCI Module, to indicate controller start status.
  557. * @isci_host: This parameter specifies the ISCI host object
  558. * @completion_status: This parameter specifies the completion status from the
  559. * core library.
  560. *
  561. */
  562. static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
  563. {
  564. if (completion_status != SCI_SUCCESS)
  565. dev_info(&ihost->pdev->dev,
  566. "controller start timed out, continuing...\n");
  567. isci_host_change_state(ihost, isci_ready);
  568. clear_bit(IHOST_START_PENDING, &ihost->flags);
  569. wake_up(&ihost->eventq);
  570. }
  571. int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
  572. {
  573. struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
  574. if (test_bit(IHOST_START_PENDING, &ihost->flags))
  575. return 0;
  576. /* todo: use sas_flush_discovery once it is upstream */
  577. scsi_flush_work(shost);
  578. scsi_flush_work(shost);
  579. dev_dbg(&ihost->pdev->dev,
  580. "%s: ihost->status = %d, time = %ld\n",
  581. __func__, isci_host_get_state(ihost), time);
  582. return 1;
  583. }
  584. /**
  585. * scic_controller_get_suggested_start_timeout() - This method returns the
  586. * suggested scic_controller_start() timeout amount. The user is free to
  587. * use any timeout value, but this method provides the suggested minimum
  588. * start timeout value. The returned value is based upon empirical
  589. * information determined as a result of interoperability testing.
  590. * @controller: the handle to the controller object for which to return the
  591. * suggested start timeout.
  592. *
  593. * This method returns the number of milliseconds for the suggested start
  594. * operation timeout.
  595. */
  596. static u32 scic_controller_get_suggested_start_timeout(
  597. struct scic_sds_controller *sc)
  598. {
  599. /* Validate the user supplied parameters. */
  600. if (sc == NULL)
  601. return 0;
  602. /*
  603. * The suggested minimum timeout value for a controller start operation:
  604. *
  605. * Signature FIS Timeout
  606. * + Phy Start Timeout
  607. * + Number of Phy Spin Up Intervals
  608. * ---------------------------------
  609. * Number of milliseconds for the controller start operation.
  610. *
  611. * NOTE: The number of phy spin up intervals will be equivalent
  612. * to the number of phys divided by the number phys allowed
  613. * per interval - 1 (once OEM parameters are supported).
  614. * Currently we assume only 1 phy per interval. */
  615. return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
  616. + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
  617. + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
  618. }
  619. static void scic_controller_enable_interrupts(
  620. struct scic_sds_controller *scic)
  621. {
  622. BUG_ON(scic->smu_registers == NULL);
  623. writel(0, &scic->smu_registers->interrupt_mask);
  624. }
  625. void scic_controller_disable_interrupts(
  626. struct scic_sds_controller *scic)
  627. {
  628. BUG_ON(scic->smu_registers == NULL);
  629. writel(0xffffffff, &scic->smu_registers->interrupt_mask);
  630. }
  631. static void scic_sds_controller_enable_port_task_scheduler(
  632. struct scic_sds_controller *scic)
  633. {
  634. u32 port_task_scheduler_value;
  635. port_task_scheduler_value =
  636. readl(&scic->scu_registers->peg0.ptsg.control);
  637. port_task_scheduler_value |=
  638. (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
  639. SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
  640. writel(port_task_scheduler_value,
  641. &scic->scu_registers->peg0.ptsg.control);
  642. }
  643. static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic)
  644. {
  645. u32 task_assignment;
  646. /*
  647. * Assign all the TCs to function 0
  648. * TODO: Do we actually need to read this register to write it back?
  649. */
  650. task_assignment =
  651. readl(&scic->smu_registers->task_context_assignment[0]);
  652. task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
  653. (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) |
  654. (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
  655. writel(task_assignment,
  656. &scic->smu_registers->task_context_assignment[0]);
  657. }
  658. static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic)
  659. {
  660. u32 index;
  661. u32 completion_queue_control_value;
  662. u32 completion_queue_get_value;
  663. u32 completion_queue_put_value;
  664. scic->completion_queue_get = 0;
  665. completion_queue_control_value =
  666. (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
  667. SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
  668. writel(completion_queue_control_value,
  669. &scic->smu_registers->completion_queue_control);
  670. /* Set the completion queue get pointer and enable the queue */
  671. completion_queue_get_value = (
  672. (SMU_CQGR_GEN_VAL(POINTER, 0))
  673. | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
  674. | (SMU_CQGR_GEN_BIT(ENABLE))
  675. | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
  676. );
  677. writel(completion_queue_get_value,
  678. &scic->smu_registers->completion_queue_get);
  679. /* Set the completion queue put pointer */
  680. completion_queue_put_value = (
  681. (SMU_CQPR_GEN_VAL(POINTER, 0))
  682. | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
  683. );
  684. writel(completion_queue_put_value,
  685. &scic->smu_registers->completion_queue_put);
  686. /* Initialize the cycle bit of the completion queue entries */
  687. for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
  688. /*
  689. * If get.cycle_bit != completion_queue.cycle_bit
  690. * its not a valid completion queue entry
  691. * so at system start all entries are invalid */
  692. scic->completion_queue[index] = 0x80000000;
  693. }
  694. }
  695. static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic)
  696. {
  697. u32 frame_queue_control_value;
  698. u32 frame_queue_get_value;
  699. u32 frame_queue_put_value;
  700. /* Write the queue size */
  701. frame_queue_control_value =
  702. SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
  703. writel(frame_queue_control_value,
  704. &scic->scu_registers->sdma.unsolicited_frame_queue_control);
  705. /* Setup the get pointer for the unsolicited frame queue */
  706. frame_queue_get_value = (
  707. SCU_UFQGP_GEN_VAL(POINTER, 0)
  708. | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
  709. );
  710. writel(frame_queue_get_value,
  711. &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
  712. /* Setup the put pointer for the unsolicited frame queue */
  713. frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
  714. writel(frame_queue_put_value,
  715. &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
  716. }
  717. /**
  718. * This method will attempt to transition into the ready state for the
  719. * controller and indicate that the controller start operation has completed
  720. * if all criteria are met.
  721. * @scic: This parameter indicates the controller object for which
  722. * to transition to ready.
  723. * @status: This parameter indicates the status value to be pass into the call
  724. * to scic_cb_controller_start_complete().
  725. *
  726. * none.
  727. */
  728. static void scic_sds_controller_transition_to_ready(
  729. struct scic_sds_controller *scic,
  730. enum sci_status status)
  731. {
  732. struct isci_host *ihost = scic_to_ihost(scic);
  733. if (scic->sm.current_state_id == SCIC_STARTING) {
  734. /*
  735. * We move into the ready state, because some of the phys/ports
  736. * may be up and operational.
  737. */
  738. sci_change_state(&scic->sm, SCIC_READY);
  739. isci_host_start_complete(ihost, status);
  740. }
  741. }
  742. static bool is_phy_starting(struct scic_sds_phy *sci_phy)
  743. {
  744. enum scic_sds_phy_states state;
  745. state = sci_phy->sm.current_state_id;
  746. switch (state) {
  747. case SCI_PHY_STARTING:
  748. case SCI_PHY_SUB_INITIAL:
  749. case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
  750. case SCI_PHY_SUB_AWAIT_IAF_UF:
  751. case SCI_PHY_SUB_AWAIT_SAS_POWER:
  752. case SCI_PHY_SUB_AWAIT_SATA_POWER:
  753. case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
  754. case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
  755. case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
  756. case SCI_PHY_SUB_FINAL:
  757. return true;
  758. default:
  759. return false;
  760. }
  761. }
  762. /**
  763. * scic_sds_controller_start_next_phy - start phy
  764. * @scic: controller
  765. *
  766. * If all the phys have been started, then attempt to transition the
  767. * controller to the READY state and inform the user
  768. * (scic_cb_controller_start_complete()).
  769. */
  770. static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
  771. {
  772. struct isci_host *ihost = scic_to_ihost(scic);
  773. struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
  774. struct scic_sds_phy *sci_phy;
  775. enum sci_status status;
  776. status = SCI_SUCCESS;
  777. if (scic->phy_startup_timer_pending)
  778. return status;
  779. if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
  780. bool is_controller_start_complete = true;
  781. u32 state;
  782. u8 index;
  783. for (index = 0; index < SCI_MAX_PHYS; index++) {
  784. sci_phy = &ihost->phys[index].sci;
  785. state = sci_phy->sm.current_state_id;
  786. if (!phy_get_non_dummy_port(sci_phy))
  787. continue;
  788. /* The controller start operation is complete iff:
  789. * - all links have been given an opportunity to start
  790. * - have no indication of a connected device
  791. * - have an indication of a connected device and it has
  792. * finished the link training process.
  793. */
  794. if ((sci_phy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
  795. (sci_phy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
  796. (sci_phy->is_in_link_training == true && is_phy_starting(sci_phy))) {
  797. is_controller_start_complete = false;
  798. break;
  799. }
  800. }
  801. /*
  802. * The controller has successfully finished the start process.
  803. * Inform the SCI Core user and transition to the READY state. */
  804. if (is_controller_start_complete == true) {
  805. scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
  806. sci_del_timer(&scic->phy_timer);
  807. scic->phy_startup_timer_pending = false;
  808. }
  809. } else {
  810. sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
  811. if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
  812. if (phy_get_non_dummy_port(sci_phy) == NULL) {
  813. scic->next_phy_to_start++;
  814. /* Caution recursion ahead be forwarned
  815. *
  816. * The PHY was never added to a PORT in MPC mode
  817. * so start the next phy in sequence This phy
  818. * will never go link up and will not draw power
  819. * the OEM parameters either configured the phy
  820. * incorrectly for the PORT or it was never
  821. * assigned to a PORT
  822. */
  823. return scic_sds_controller_start_next_phy(scic);
  824. }
  825. }
  826. status = scic_sds_phy_start(sci_phy);
  827. if (status == SCI_SUCCESS) {
  828. sci_mod_timer(&scic->phy_timer,
  829. SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
  830. scic->phy_startup_timer_pending = true;
  831. } else {
  832. dev_warn(scic_to_dev(scic),
  833. "%s: Controller stop operation failed "
  834. "to stop phy %d because of status "
  835. "%d.\n",
  836. __func__,
  837. ihost->phys[scic->next_phy_to_start].sci.phy_index,
  838. status);
  839. }
  840. scic->next_phy_to_start++;
  841. }
  842. return status;
  843. }
  844. static void phy_startup_timeout(unsigned long data)
  845. {
  846. struct sci_timer *tmr = (struct sci_timer *)data;
  847. struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), phy_timer);
  848. struct isci_host *ihost = scic_to_ihost(scic);
  849. unsigned long flags;
  850. enum sci_status status;
  851. spin_lock_irqsave(&ihost->scic_lock, flags);
  852. if (tmr->cancel)
  853. goto done;
  854. scic->phy_startup_timer_pending = false;
  855. do {
  856. status = scic_sds_controller_start_next_phy(scic);
  857. } while (status != SCI_SUCCESS);
  858. done:
  859. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  860. }
  861. static void isci_tci_free(struct isci_host *ihost, u16 tci)
  862. {
  863. u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
  864. ihost->tci_pool[tail] = tci;
  865. ihost->tci_tail = tail + 1;
  866. }
  867. static u16 isci_tci_alloc(struct isci_host *ihost)
  868. {
  869. u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
  870. u16 tci = ihost->tci_pool[head];
  871. ihost->tci_head = head + 1;
  872. return tci;
  873. }
  874. static u16 isci_tci_active(struct isci_host *ihost)
  875. {
  876. return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
  877. }
  878. static u16 isci_tci_space(struct isci_host *ihost)
  879. {
  880. return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
  881. }
  882. static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
  883. u32 timeout)
  884. {
  885. struct isci_host *ihost = scic_to_ihost(scic);
  886. enum sci_status result;
  887. u16 index;
  888. if (scic->sm.current_state_id != SCIC_INITIALIZED) {
  889. dev_warn(scic_to_dev(scic),
  890. "SCIC Controller start operation requested in "
  891. "invalid state\n");
  892. return SCI_FAILURE_INVALID_STATE;
  893. }
  894. /* Build the TCi free pool */
  895. BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
  896. ihost->tci_head = 0;
  897. ihost->tci_tail = 0;
  898. for (index = 0; index < scic->task_context_entries; index++)
  899. isci_tci_free(ihost, index);
  900. /* Build the RNi free pool */
  901. scic_sds_remote_node_table_initialize(
  902. &scic->available_remote_nodes,
  903. scic->remote_node_entries);
  904. /*
  905. * Before anything else lets make sure we will not be
  906. * interrupted by the hardware.
  907. */
  908. scic_controller_disable_interrupts(scic);
  909. /* Enable the port task scheduler */
  910. scic_sds_controller_enable_port_task_scheduler(scic);
  911. /* Assign all the task entries to scic physical function */
  912. scic_sds_controller_assign_task_entries(scic);
  913. /* Now initialize the completion queue */
  914. scic_sds_controller_initialize_completion_queue(scic);
  915. /* Initialize the unsolicited frame queue for use */
  916. scic_sds_controller_initialize_unsolicited_frame_queue(scic);
  917. /* Start all of the ports on this controller */
  918. for (index = 0; index < scic->logical_port_entries; index++) {
  919. struct scic_sds_port *sci_port = &ihost->ports[index].sci;
  920. result = scic_sds_port_start(sci_port);
  921. if (result)
  922. return result;
  923. }
  924. scic_sds_controller_start_next_phy(scic);
  925. sci_mod_timer(&scic->timer, timeout);
  926. sci_change_state(&scic->sm, SCIC_STARTING);
  927. return SCI_SUCCESS;
  928. }
  929. void isci_host_scan_start(struct Scsi_Host *shost)
  930. {
  931. struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
  932. unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci);
  933. set_bit(IHOST_START_PENDING, &ihost->flags);
  934. spin_lock_irq(&ihost->scic_lock);
  935. scic_controller_start(&ihost->sci, tmo);
  936. scic_controller_enable_interrupts(&ihost->sci);
  937. spin_unlock_irq(&ihost->scic_lock);
  938. }
  939. static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
  940. {
  941. isci_host_change_state(ihost, isci_stopped);
  942. scic_controller_disable_interrupts(&ihost->sci);
  943. clear_bit(IHOST_STOP_PENDING, &ihost->flags);
  944. wake_up(&ihost->eventq);
  945. }
  946. static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
  947. {
  948. /* Empty out the completion queue */
  949. if (scic_sds_controller_completion_queue_has_entries(scic))
  950. scic_sds_controller_process_completions(scic);
  951. /* Clear the interrupt and enable all interrupts again */
  952. writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
  953. /* Could we write the value of SMU_ISR_COMPLETION? */
  954. writel(0xFF000000, &scic->smu_registers->interrupt_mask);
  955. writel(0, &scic->smu_registers->interrupt_mask);
  956. }
  957. /**
  958. * isci_host_completion_routine() - This function is the delayed service
  959. * routine that calls the sci core library's completion handler. It's
  960. * scheduled as a tasklet from the interrupt service routine when interrupts
  961. * in use, or set as the timeout function in polled mode.
  962. * @data: This parameter specifies the ISCI host object
  963. *
  964. */
  965. static void isci_host_completion_routine(unsigned long data)
  966. {
  967. struct isci_host *isci_host = (struct isci_host *)data;
  968. struct list_head completed_request_list;
  969. struct list_head errored_request_list;
  970. struct list_head *current_position;
  971. struct list_head *next_position;
  972. struct isci_request *request;
  973. struct isci_request *next_request;
  974. struct sas_task *task;
  975. INIT_LIST_HEAD(&completed_request_list);
  976. INIT_LIST_HEAD(&errored_request_list);
  977. spin_lock_irq(&isci_host->scic_lock);
  978. scic_sds_controller_completion_handler(&isci_host->sci);
  979. /* Take the lists of completed I/Os from the host. */
  980. list_splice_init(&isci_host->requests_to_complete,
  981. &completed_request_list);
  982. /* Take the list of errored I/Os from the host. */
  983. list_splice_init(&isci_host->requests_to_errorback,
  984. &errored_request_list);
  985. spin_unlock_irq(&isci_host->scic_lock);
  986. /* Process any completions in the lists. */
  987. list_for_each_safe(current_position, next_position,
  988. &completed_request_list) {
  989. request = list_entry(current_position, struct isci_request,
  990. completed_node);
  991. task = isci_request_access_task(request);
  992. /* Normal notification (task_done) */
  993. dev_dbg(&isci_host->pdev->dev,
  994. "%s: Normal - request/task = %p/%p\n",
  995. __func__,
  996. request,
  997. task);
  998. /* Return the task to libsas */
  999. if (task != NULL) {
  1000. task->lldd_task = NULL;
  1001. if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
  1002. /* If the task is already in the abort path,
  1003. * the task_done callback cannot be called.
  1004. */
  1005. task->task_done(task);
  1006. }
  1007. }
  1008. /* Free the request object. */
  1009. isci_request_free(isci_host, request);
  1010. }
  1011. list_for_each_entry_safe(request, next_request, &errored_request_list,
  1012. completed_node) {
  1013. task = isci_request_access_task(request);
  1014. /* Use sas_task_abort */
  1015. dev_warn(&isci_host->pdev->dev,
  1016. "%s: Error - request/task = %p/%p\n",
  1017. __func__,
  1018. request,
  1019. task);
  1020. if (task != NULL) {
  1021. /* Put the task into the abort path if it's not there
  1022. * already.
  1023. */
  1024. if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
  1025. sas_task_abort(task);
  1026. } else {
  1027. /* This is a case where the request has completed with a
  1028. * status such that it needed further target servicing,
  1029. * but the sas_task reference has already been removed
  1030. * from the request. Since it was errored, it was not
  1031. * being aborted, so there is nothing to do except free
  1032. * it.
  1033. */
  1034. spin_lock_irq(&isci_host->scic_lock);
  1035. /* Remove the request from the remote device's list
  1036. * of pending requests.
  1037. */
  1038. list_del_init(&request->dev_node);
  1039. spin_unlock_irq(&isci_host->scic_lock);
  1040. /* Free the request object. */
  1041. isci_request_free(isci_host, request);
  1042. }
  1043. }
  1044. }
  1045. /**
  1046. * scic_controller_stop() - This method will stop an individual controller
  1047. * object.This method will invoke the associated user callback upon
  1048. * completion. The completion callback is called when the following
  1049. * conditions are met: -# the method return status is SCI_SUCCESS. -# the
  1050. * controller has been quiesced. This method will ensure that all IO
  1051. * requests are quiesced, phys are stopped, and all additional operation by
  1052. * the hardware is halted.
  1053. * @controller: the handle to the controller object to stop.
  1054. * @timeout: This parameter specifies the number of milliseconds in which the
  1055. * stop operation should complete.
  1056. *
  1057. * The controller must be in the STARTED or STOPPED state. Indicate if the
  1058. * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
  1059. * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
  1060. * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
  1061. * controller is not either in the STARTED or STOPPED states.
  1062. */
  1063. static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
  1064. u32 timeout)
  1065. {
  1066. if (scic->sm.current_state_id != SCIC_READY) {
  1067. dev_warn(scic_to_dev(scic),
  1068. "SCIC Controller stop operation requested in "
  1069. "invalid state\n");
  1070. return SCI_FAILURE_INVALID_STATE;
  1071. }
  1072. sci_mod_timer(&scic->timer, timeout);
  1073. sci_change_state(&scic->sm, SCIC_STOPPING);
  1074. return SCI_SUCCESS;
  1075. }
  1076. /**
  1077. * scic_controller_reset() - This method will reset the supplied core
  1078. * controller regardless of the state of said controller. This operation is
  1079. * considered destructive. In other words, all current operations are wiped
  1080. * out. No IO completions for outstanding devices occur. Outstanding IO
  1081. * requests are not aborted or completed at the actual remote device.
  1082. * @controller: the handle to the controller object to reset.
  1083. *
  1084. * Indicate if the controller reset method succeeded or failed in some way.
  1085. * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
  1086. * the controller reset operation is unable to complete.
  1087. */
  1088. static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
  1089. {
  1090. switch (scic->sm.current_state_id) {
  1091. case SCIC_RESET:
  1092. case SCIC_READY:
  1093. case SCIC_STOPPED:
  1094. case SCIC_FAILED:
  1095. /*
  1096. * The reset operation is not a graceful cleanup, just
  1097. * perform the state transition.
  1098. */
  1099. sci_change_state(&scic->sm, SCIC_RESETTING);
  1100. return SCI_SUCCESS;
  1101. default:
  1102. dev_warn(scic_to_dev(scic),
  1103. "SCIC Controller reset operation requested in "
  1104. "invalid state\n");
  1105. return SCI_FAILURE_INVALID_STATE;
  1106. }
  1107. }
  1108. void isci_host_deinit(struct isci_host *ihost)
  1109. {
  1110. int i;
  1111. isci_host_change_state(ihost, isci_stopping);
  1112. for (i = 0; i < SCI_MAX_PORTS; i++) {
  1113. struct isci_port *iport = &ihost->ports[i];
  1114. struct isci_remote_device *idev, *d;
  1115. list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
  1116. isci_remote_device_change_state(idev, isci_stopping);
  1117. isci_remote_device_stop(ihost, idev);
  1118. }
  1119. }
  1120. set_bit(IHOST_STOP_PENDING, &ihost->flags);
  1121. spin_lock_irq(&ihost->scic_lock);
  1122. scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT);
  1123. spin_unlock_irq(&ihost->scic_lock);
  1124. wait_for_stop(ihost);
  1125. scic_controller_reset(&ihost->sci);
  1126. /* Cancel any/all outstanding port timers */
  1127. for (i = 0; i < ihost->sci.logical_port_entries; i++) {
  1128. struct scic_sds_port *sci_port = &ihost->ports[i].sci;
  1129. del_timer_sync(&sci_port->timer.timer);
  1130. }
  1131. /* Cancel any/all outstanding phy timers */
  1132. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1133. struct scic_sds_phy *sci_phy = &ihost->phys[i].sci;
  1134. del_timer_sync(&sci_phy->sata_timer.timer);
  1135. }
  1136. del_timer_sync(&ihost->sci.port_agent.timer.timer);
  1137. del_timer_sync(&ihost->sci.power_control.timer.timer);
  1138. del_timer_sync(&ihost->sci.timer.timer);
  1139. del_timer_sync(&ihost->sci.phy_timer.timer);
  1140. }
  1141. static void __iomem *scu_base(struct isci_host *isci_host)
  1142. {
  1143. struct pci_dev *pdev = isci_host->pdev;
  1144. int id = isci_host->id;
  1145. return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
  1146. }
  1147. static void __iomem *smu_base(struct isci_host *isci_host)
  1148. {
  1149. struct pci_dev *pdev = isci_host->pdev;
  1150. int id = isci_host->id;
  1151. return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
  1152. }
  1153. static void isci_user_parameters_get(
  1154. struct isci_host *isci_host,
  1155. union scic_user_parameters *scic_user_params)
  1156. {
  1157. struct scic_sds_user_parameters *u = &scic_user_params->sds1;
  1158. int i;
  1159. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1160. struct sci_phy_user_params *u_phy = &u->phys[i];
  1161. u_phy->max_speed_generation = phy_gen;
  1162. /* we are not exporting these for now */
  1163. u_phy->align_insertion_frequency = 0x7f;
  1164. u_phy->in_connection_align_insertion_frequency = 0xff;
  1165. u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
  1166. }
  1167. u->stp_inactivity_timeout = stp_inactive_to;
  1168. u->ssp_inactivity_timeout = ssp_inactive_to;
  1169. u->stp_max_occupancy_timeout = stp_max_occ_to;
  1170. u->ssp_max_occupancy_timeout = ssp_max_occ_to;
  1171. u->no_outbound_task_timeout = no_outbound_task_to;
  1172. u->max_number_concurrent_device_spin_up = max_concurr_spinup;
  1173. }
  1174. static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm)
  1175. {
  1176. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1177. sci_change_state(&scic->sm, SCIC_RESET);
  1178. }
  1179. static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm)
  1180. {
  1181. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1182. sci_del_timer(&scic->timer);
  1183. }
  1184. #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
  1185. #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
  1186. #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
  1187. #define INTERRUPT_COALESCE_NUMBER_MAX 256
  1188. #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
  1189. #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
  1190. /**
  1191. * scic_controller_set_interrupt_coalescence() - This method allows the user to
  1192. * configure the interrupt coalescence.
  1193. * @controller: This parameter represents the handle to the controller object
  1194. * for which its interrupt coalesce register is overridden.
  1195. * @coalesce_number: Used to control the number of entries in the Completion
  1196. * Queue before an interrupt is generated. If the number of entries exceed
  1197. * this number, an interrupt will be generated. The valid range of the input
  1198. * is [0, 256]. A setting of 0 results in coalescing being disabled.
  1199. * @coalesce_timeout: Timeout value in microseconds. The valid range of the
  1200. * input is [0, 2700000] . A setting of 0 is allowed and results in no
  1201. * interrupt coalescing timeout.
  1202. *
  1203. * Indicate if the user successfully set the interrupt coalesce parameters.
  1204. * SCI_SUCCESS The user successfully updated the interrutp coalescence.
  1205. * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
  1206. */
  1207. static enum sci_status scic_controller_set_interrupt_coalescence(
  1208. struct scic_sds_controller *scic_controller,
  1209. u32 coalesce_number,
  1210. u32 coalesce_timeout)
  1211. {
  1212. u8 timeout_encode = 0;
  1213. u32 min = 0;
  1214. u32 max = 0;
  1215. /* Check if the input parameters fall in the range. */
  1216. if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
  1217. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1218. /*
  1219. * Defined encoding for interrupt coalescing timeout:
  1220. * Value Min Max Units
  1221. * ----- --- --- -----
  1222. * 0 - - Disabled
  1223. * 1 13.3 20.0 ns
  1224. * 2 26.7 40.0
  1225. * 3 53.3 80.0
  1226. * 4 106.7 160.0
  1227. * 5 213.3 320.0
  1228. * 6 426.7 640.0
  1229. * 7 853.3 1280.0
  1230. * 8 1.7 2.6 us
  1231. * 9 3.4 5.1
  1232. * 10 6.8 10.2
  1233. * 11 13.7 20.5
  1234. * 12 27.3 41.0
  1235. * 13 54.6 81.9
  1236. * 14 109.2 163.8
  1237. * 15 218.5 327.7
  1238. * 16 436.9 655.4
  1239. * 17 873.8 1310.7
  1240. * 18 1.7 2.6 ms
  1241. * 19 3.5 5.2
  1242. * 20 7.0 10.5
  1243. * 21 14.0 21.0
  1244. * 22 28.0 41.9
  1245. * 23 55.9 83.9
  1246. * 24 111.8 167.8
  1247. * 25 223.7 335.5
  1248. * 26 447.4 671.1
  1249. * 27 894.8 1342.2
  1250. * 28 1.8 2.7 s
  1251. * Others Undefined */
  1252. /*
  1253. * Use the table above to decide the encode of interrupt coalescing timeout
  1254. * value for register writing. */
  1255. if (coalesce_timeout == 0)
  1256. timeout_encode = 0;
  1257. else{
  1258. /* make the timeout value in unit of (10 ns). */
  1259. coalesce_timeout = coalesce_timeout * 100;
  1260. min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
  1261. max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
  1262. /* get the encode of timeout for register writing. */
  1263. for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
  1264. timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
  1265. timeout_encode++) {
  1266. if (min <= coalesce_timeout && max > coalesce_timeout)
  1267. break;
  1268. else if (coalesce_timeout >= max && coalesce_timeout < min * 2
  1269. && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
  1270. if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
  1271. break;
  1272. else{
  1273. timeout_encode++;
  1274. break;
  1275. }
  1276. } else {
  1277. max = max * 2;
  1278. min = min * 2;
  1279. }
  1280. }
  1281. if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
  1282. /* the value is out of range. */
  1283. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1284. }
  1285. writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
  1286. SMU_ICC_GEN_VAL(TIMER, timeout_encode),
  1287. &scic_controller->smu_registers->interrupt_coalesce_control);
  1288. scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
  1289. scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
  1290. return SCI_SUCCESS;
  1291. }
  1292. static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm)
  1293. {
  1294. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1295. /* set the default interrupt coalescence number and timeout value. */
  1296. scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
  1297. }
  1298. static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm)
  1299. {
  1300. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1301. /* disable interrupt coalescence. */
  1302. scic_controller_set_interrupt_coalescence(scic, 0, 0);
  1303. }
  1304. static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
  1305. {
  1306. u32 index;
  1307. enum sci_status status;
  1308. enum sci_status phy_status;
  1309. struct isci_host *ihost = scic_to_ihost(scic);
  1310. status = SCI_SUCCESS;
  1311. for (index = 0; index < SCI_MAX_PHYS; index++) {
  1312. phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
  1313. if (phy_status != SCI_SUCCESS &&
  1314. phy_status != SCI_FAILURE_INVALID_STATE) {
  1315. status = SCI_FAILURE;
  1316. dev_warn(scic_to_dev(scic),
  1317. "%s: Controller stop operation failed to stop "
  1318. "phy %d because of status %d.\n",
  1319. __func__,
  1320. ihost->phys[index].sci.phy_index, phy_status);
  1321. }
  1322. }
  1323. return status;
  1324. }
  1325. static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
  1326. {
  1327. u32 index;
  1328. enum sci_status port_status;
  1329. enum sci_status status = SCI_SUCCESS;
  1330. struct isci_host *ihost = scic_to_ihost(scic);
  1331. for (index = 0; index < scic->logical_port_entries; index++) {
  1332. struct scic_sds_port *sci_port = &ihost->ports[index].sci;
  1333. port_status = scic_sds_port_stop(sci_port);
  1334. if ((port_status != SCI_SUCCESS) &&
  1335. (port_status != SCI_FAILURE_INVALID_STATE)) {
  1336. status = SCI_FAILURE;
  1337. dev_warn(scic_to_dev(scic),
  1338. "%s: Controller stop operation failed to "
  1339. "stop port %d because of status %d.\n",
  1340. __func__,
  1341. sci_port->logical_port_index,
  1342. port_status);
  1343. }
  1344. }
  1345. return status;
  1346. }
  1347. static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
  1348. {
  1349. u32 index;
  1350. enum sci_status status;
  1351. enum sci_status device_status;
  1352. status = SCI_SUCCESS;
  1353. for (index = 0; index < scic->remote_node_entries; index++) {
  1354. if (scic->device_table[index] != NULL) {
  1355. /* / @todo What timeout value do we want to provide to this request? */
  1356. device_status = scic_remote_device_stop(scic->device_table[index], 0);
  1357. if ((device_status != SCI_SUCCESS) &&
  1358. (device_status != SCI_FAILURE_INVALID_STATE)) {
  1359. dev_warn(scic_to_dev(scic),
  1360. "%s: Controller stop operation failed "
  1361. "to stop device 0x%p because of "
  1362. "status %d.\n",
  1363. __func__,
  1364. scic->device_table[index], device_status);
  1365. }
  1366. }
  1367. }
  1368. return status;
  1369. }
  1370. static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm)
  1371. {
  1372. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1373. /* Stop all of the components for this controller */
  1374. scic_sds_controller_stop_phys(scic);
  1375. scic_sds_controller_stop_ports(scic);
  1376. scic_sds_controller_stop_devices(scic);
  1377. }
  1378. static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm)
  1379. {
  1380. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1381. sci_del_timer(&scic->timer);
  1382. }
  1383. /**
  1384. * scic_sds_controller_reset_hardware() -
  1385. *
  1386. * This method will reset the controller hardware.
  1387. */
  1388. static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic)
  1389. {
  1390. /* Disable interrupts so we dont take any spurious interrupts */
  1391. scic_controller_disable_interrupts(scic);
  1392. /* Reset the SCU */
  1393. writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
  1394. /* Delay for 1ms to before clearing the CQP and UFQPR. */
  1395. udelay(1000);
  1396. /* The write to the CQGR clears the CQP */
  1397. writel(0x00000000, &scic->smu_registers->completion_queue_get);
  1398. /* The write to the UFQGP clears the UFQPR */
  1399. writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
  1400. }
  1401. static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm)
  1402. {
  1403. struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
  1404. scic_sds_controller_reset_hardware(scic);
  1405. sci_change_state(&scic->sm, SCIC_RESET);
  1406. }
  1407. static const struct sci_base_state scic_sds_controller_state_table[] = {
  1408. [SCIC_INITIAL] = {
  1409. .enter_state = scic_sds_controller_initial_state_enter,
  1410. },
  1411. [SCIC_RESET] = {},
  1412. [SCIC_INITIALIZING] = {},
  1413. [SCIC_INITIALIZED] = {},
  1414. [SCIC_STARTING] = {
  1415. .exit_state = scic_sds_controller_starting_state_exit,
  1416. },
  1417. [SCIC_READY] = {
  1418. .enter_state = scic_sds_controller_ready_state_enter,
  1419. .exit_state = scic_sds_controller_ready_state_exit,
  1420. },
  1421. [SCIC_RESETTING] = {
  1422. .enter_state = scic_sds_controller_resetting_state_enter,
  1423. },
  1424. [SCIC_STOPPING] = {
  1425. .enter_state = scic_sds_controller_stopping_state_enter,
  1426. .exit_state = scic_sds_controller_stopping_state_exit,
  1427. },
  1428. [SCIC_STOPPED] = {},
  1429. [SCIC_FAILED] = {}
  1430. };
  1431. static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
  1432. {
  1433. /* these defaults are overridden by the platform / firmware */
  1434. struct isci_host *ihost = scic_to_ihost(scic);
  1435. u16 index;
  1436. /* Default to APC mode. */
  1437. scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
  1438. /* Default to APC mode. */
  1439. scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
  1440. /* Default to no SSC operation. */
  1441. scic->oem_parameters.sds1.controller.do_enable_ssc = false;
  1442. /* Initialize all of the port parameter information to narrow ports. */
  1443. for (index = 0; index < SCI_MAX_PORTS; index++) {
  1444. scic->oem_parameters.sds1.ports[index].phy_mask = 0;
  1445. }
  1446. /* Initialize all of the phy parameter information. */
  1447. for (index = 0; index < SCI_MAX_PHYS; index++) {
  1448. /* Default to 6G (i.e. Gen 3) for now. */
  1449. scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
  1450. /* the frequencies cannot be 0 */
  1451. scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
  1452. scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
  1453. scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
  1454. /*
  1455. * Previous Vitesse based expanders had a arbitration issue that
  1456. * is worked around by having the upper 32-bits of SAS address
  1457. * with a value greater then the Vitesse company identifier.
  1458. * Hence, usage of 0x5FCFFFFF. */
  1459. scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
  1460. scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
  1461. }
  1462. scic->user_parameters.sds1.stp_inactivity_timeout = 5;
  1463. scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
  1464. scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
  1465. scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
  1466. scic->user_parameters.sds1.no_outbound_task_timeout = 20;
  1467. }
  1468. static void controller_timeout(unsigned long data)
  1469. {
  1470. struct sci_timer *tmr = (struct sci_timer *)data;
  1471. struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer);
  1472. struct isci_host *ihost = scic_to_ihost(scic);
  1473. struct sci_base_state_machine *sm = &scic->sm;
  1474. unsigned long flags;
  1475. spin_lock_irqsave(&ihost->scic_lock, flags);
  1476. if (tmr->cancel)
  1477. goto done;
  1478. if (sm->current_state_id == SCIC_STARTING)
  1479. scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
  1480. else if (sm->current_state_id == SCIC_STOPPING) {
  1481. sci_change_state(sm, SCIC_FAILED);
  1482. isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
  1483. } else /* / @todo Now what do we want to do in this case? */
  1484. dev_err(scic_to_dev(scic),
  1485. "%s: Controller timer fired when controller was not "
  1486. "in a state being timed.\n",
  1487. __func__);
  1488. done:
  1489. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1490. }
  1491. /**
  1492. * scic_controller_construct() - This method will attempt to construct a
  1493. * controller object utilizing the supplied parameter information.
  1494. * @c: This parameter specifies the controller to be constructed.
  1495. * @scu_base: mapped base address of the scu registers
  1496. * @smu_base: mapped base address of the smu registers
  1497. *
  1498. * Indicate if the controller was successfully constructed or if it failed in
  1499. * some way. SCI_SUCCESS This value is returned if the controller was
  1500. * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
  1501. * if the interrupt coalescence timer may cause SAS compliance issues for SMP
  1502. * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
  1503. * This value is returned if the controller does not support the supplied type.
  1504. * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
  1505. * controller does not support the supplied initialization data version.
  1506. */
  1507. static enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
  1508. void __iomem *scu_base,
  1509. void __iomem *smu_base)
  1510. {
  1511. struct isci_host *ihost = scic_to_ihost(scic);
  1512. u8 i;
  1513. sci_init_sm(&scic->sm, scic_sds_controller_state_table, SCIC_INITIAL);
  1514. scic->scu_registers = scu_base;
  1515. scic->smu_registers = smu_base;
  1516. scic_sds_port_configuration_agent_construct(&scic->port_agent);
  1517. /* Construct the ports for this controller */
  1518. for (i = 0; i < SCI_MAX_PORTS; i++)
  1519. scic_sds_port_construct(&ihost->ports[i].sci, i, scic);
  1520. scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic);
  1521. /* Construct the phys for this controller */
  1522. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1523. /* Add all the PHYs to the dummy port */
  1524. scic_sds_phy_construct(&ihost->phys[i].sci,
  1525. &ihost->ports[SCI_MAX_PORTS].sci, i);
  1526. }
  1527. scic->invalid_phy_mask = 0;
  1528. sci_init_timer(&scic->timer, controller_timeout);
  1529. /* Initialize the User and OEM parameters to default values. */
  1530. scic_sds_controller_set_default_config_parameters(scic);
  1531. return scic_controller_reset(scic);
  1532. }
  1533. int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
  1534. {
  1535. int i;
  1536. for (i = 0; i < SCI_MAX_PORTS; i++)
  1537. if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
  1538. return -EINVAL;
  1539. for (i = 0; i < SCI_MAX_PHYS; i++)
  1540. if (oem->phys[i].sas_address.high == 0 &&
  1541. oem->phys[i].sas_address.low == 0)
  1542. return -EINVAL;
  1543. if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
  1544. for (i = 0; i < SCI_MAX_PHYS; i++)
  1545. if (oem->ports[i].phy_mask != 0)
  1546. return -EINVAL;
  1547. } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
  1548. u8 phy_mask = 0;
  1549. for (i = 0; i < SCI_MAX_PHYS; i++)
  1550. phy_mask |= oem->ports[i].phy_mask;
  1551. if (phy_mask == 0)
  1552. return -EINVAL;
  1553. } else
  1554. return -EINVAL;
  1555. if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
  1556. return -EINVAL;
  1557. return 0;
  1558. }
  1559. static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
  1560. union scic_oem_parameters *scic_parms)
  1561. {
  1562. u32 state = scic->sm.current_state_id;
  1563. if (state == SCIC_RESET ||
  1564. state == SCIC_INITIALIZING ||
  1565. state == SCIC_INITIALIZED) {
  1566. if (scic_oem_parameters_validate(&scic_parms->sds1))
  1567. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1568. scic->oem_parameters.sds1 = scic_parms->sds1;
  1569. return SCI_SUCCESS;
  1570. }
  1571. return SCI_FAILURE_INVALID_STATE;
  1572. }
  1573. void scic_oem_parameters_get(
  1574. struct scic_sds_controller *scic,
  1575. union scic_oem_parameters *scic_parms)
  1576. {
  1577. memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
  1578. }
  1579. static void power_control_timeout(unsigned long data)
  1580. {
  1581. struct sci_timer *tmr = (struct sci_timer *)data;
  1582. struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), power_control.timer);
  1583. struct isci_host *ihost = scic_to_ihost(scic);
  1584. struct scic_sds_phy *sci_phy;
  1585. unsigned long flags;
  1586. u8 i;
  1587. spin_lock_irqsave(&ihost->scic_lock, flags);
  1588. if (tmr->cancel)
  1589. goto done;
  1590. scic->power_control.phys_granted_power = 0;
  1591. if (scic->power_control.phys_waiting == 0) {
  1592. scic->power_control.timer_started = false;
  1593. goto done;
  1594. }
  1595. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1596. if (scic->power_control.phys_waiting == 0)
  1597. break;
  1598. sci_phy = scic->power_control.requesters[i];
  1599. if (sci_phy == NULL)
  1600. continue;
  1601. if (scic->power_control.phys_granted_power >=
  1602. scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up)
  1603. break;
  1604. scic->power_control.requesters[i] = NULL;
  1605. scic->power_control.phys_waiting--;
  1606. scic->power_control.phys_granted_power++;
  1607. scic_sds_phy_consume_power_handler(sci_phy);
  1608. }
  1609. /*
  1610. * It doesn't matter if the power list is empty, we need to start the
  1611. * timer in case another phy becomes ready.
  1612. */
  1613. sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
  1614. scic->power_control.timer_started = true;
  1615. done:
  1616. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1617. }
  1618. /**
  1619. * This method inserts the phy in the stagger spinup control queue.
  1620. * @scic:
  1621. *
  1622. *
  1623. */
  1624. void scic_sds_controller_power_control_queue_insert(
  1625. struct scic_sds_controller *scic,
  1626. struct scic_sds_phy *sci_phy)
  1627. {
  1628. BUG_ON(sci_phy == NULL);
  1629. if (scic->power_control.phys_granted_power <
  1630. scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
  1631. scic->power_control.phys_granted_power++;
  1632. scic_sds_phy_consume_power_handler(sci_phy);
  1633. /*
  1634. * stop and start the power_control timer. When the timer fires, the
  1635. * no_of_phys_granted_power will be set to 0
  1636. */
  1637. if (scic->power_control.timer_started)
  1638. sci_del_timer(&scic->power_control.timer);
  1639. sci_mod_timer(&scic->power_control.timer,
  1640. SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
  1641. scic->power_control.timer_started = true;
  1642. } else {
  1643. /* Add the phy in the waiting list */
  1644. scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
  1645. scic->power_control.phys_waiting++;
  1646. }
  1647. }
  1648. /**
  1649. * This method removes the phy from the stagger spinup control queue.
  1650. * @scic:
  1651. *
  1652. *
  1653. */
  1654. void scic_sds_controller_power_control_queue_remove(
  1655. struct scic_sds_controller *scic,
  1656. struct scic_sds_phy *sci_phy)
  1657. {
  1658. BUG_ON(sci_phy == NULL);
  1659. if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
  1660. scic->power_control.phys_waiting--;
  1661. }
  1662. scic->power_control.requesters[sci_phy->phy_index] = NULL;
  1663. }
  1664. #define AFE_REGISTER_WRITE_DELAY 10
  1665. /* Initialize the AFE for this phy index. We need to read the AFE setup from
  1666. * the OEM parameters
  1667. */
  1668. static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
  1669. {
  1670. const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
  1671. u32 afe_status;
  1672. u32 phy_id;
  1673. /* Clear DFX Status registers */
  1674. writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
  1675. udelay(AFE_REGISTER_WRITE_DELAY);
  1676. if (is_b0()) {
  1677. /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
  1678. * Timer, PM Stagger Timer */
  1679. writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
  1680. udelay(AFE_REGISTER_WRITE_DELAY);
  1681. }
  1682. /* Configure bias currents to normal */
  1683. if (is_a0())
  1684. writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
  1685. else if (is_a2())
  1686. writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
  1687. else if (is_b0() || is_c0())
  1688. writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
  1689. udelay(AFE_REGISTER_WRITE_DELAY);
  1690. /* Enable PLL */
  1691. if (is_b0() || is_c0())
  1692. writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
  1693. else
  1694. writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
  1695. udelay(AFE_REGISTER_WRITE_DELAY);
  1696. /* Wait for the PLL to lock */
  1697. do {
  1698. afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
  1699. udelay(AFE_REGISTER_WRITE_DELAY);
  1700. } while ((afe_status & 0x00001000) == 0);
  1701. if (is_a0() || is_a2()) {
  1702. /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
  1703. writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
  1704. udelay(AFE_REGISTER_WRITE_DELAY);
  1705. }
  1706. for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
  1707. const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
  1708. if (is_b0()) {
  1709. /* Configure transmitter SSC parameters */
  1710. writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
  1711. udelay(AFE_REGISTER_WRITE_DELAY);
  1712. } else if (is_c0()) {
  1713. /* Configure transmitter SSC parameters */
  1714. writel(0x0003000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
  1715. udelay(AFE_REGISTER_WRITE_DELAY);
  1716. /*
  1717. * All defaults, except the Receive Word Alignament/Comma Detect
  1718. * Enable....(0xe800) */
  1719. writel(0x00004500, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
  1720. udelay(AFE_REGISTER_WRITE_DELAY);
  1721. } else {
  1722. /*
  1723. * All defaults, except the Receive Word Alignament/Comma Detect
  1724. * Enable....(0xe800) */
  1725. writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
  1726. udelay(AFE_REGISTER_WRITE_DELAY);
  1727. writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
  1728. udelay(AFE_REGISTER_WRITE_DELAY);
  1729. }
  1730. /*
  1731. * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
  1732. * & increase TX int & ext bias 20%....(0xe85c) */
  1733. if (is_a0())
  1734. writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
  1735. else if (is_a2())
  1736. writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
  1737. else if (is_b0()) {
  1738. /* Power down TX and RX (PWRDNTX and PWRDNRX) */
  1739. writel(0x000003D7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
  1740. udelay(AFE_REGISTER_WRITE_DELAY);
  1741. /*
  1742. * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
  1743. * & increase TX int & ext bias 20%....(0xe85c) */
  1744. writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
  1745. } else {
  1746. writel(0x000001E7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
  1747. udelay(AFE_REGISTER_WRITE_DELAY);
  1748. /*
  1749. * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
  1750. * & increase TX int & ext bias 20%....(0xe85c) */
  1751. writel(0x000001E4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
  1752. }
  1753. udelay(AFE_REGISTER_WRITE_DELAY);
  1754. if (is_a0() || is_a2()) {
  1755. /* Enable TX equalization (0xe824) */
  1756. writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
  1757. udelay(AFE_REGISTER_WRITE_DELAY);
  1758. }
  1759. /*
  1760. * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
  1761. * RDD=0x0(RX Detect Enabled) ....(0xe800) */
  1762. writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
  1763. udelay(AFE_REGISTER_WRITE_DELAY);
  1764. /* Leave DFE/FFE on */
  1765. if (is_a0())
  1766. writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
  1767. else if (is_a2())
  1768. writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
  1769. else if (is_b0()) {
  1770. writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
  1771. udelay(AFE_REGISTER_WRITE_DELAY);
  1772. /* Enable TX equalization (0xe824) */
  1773. writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
  1774. } else {
  1775. writel(0x0140DF0F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
  1776. udelay(AFE_REGISTER_WRITE_DELAY);
  1777. writel(0x3F6F103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
  1778. udelay(AFE_REGISTER_WRITE_DELAY);
  1779. /* Enable TX equalization (0xe824) */
  1780. writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
  1781. }
  1782. udelay(AFE_REGISTER_WRITE_DELAY);
  1783. writel(oem_phy->afe_tx_amp_control0,
  1784. &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
  1785. udelay(AFE_REGISTER_WRITE_DELAY);
  1786. writel(oem_phy->afe_tx_amp_control1,
  1787. &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
  1788. udelay(AFE_REGISTER_WRITE_DELAY);
  1789. writel(oem_phy->afe_tx_amp_control2,
  1790. &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
  1791. udelay(AFE_REGISTER_WRITE_DELAY);
  1792. writel(oem_phy->afe_tx_amp_control3,
  1793. &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
  1794. udelay(AFE_REGISTER_WRITE_DELAY);
  1795. }
  1796. /* Transfer control to the PEs */
  1797. writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
  1798. udelay(AFE_REGISTER_WRITE_DELAY);
  1799. }
  1800. static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
  1801. {
  1802. sci_init_timer(&scic->power_control.timer, power_control_timeout);
  1803. memset(scic->power_control.requesters, 0,
  1804. sizeof(scic->power_control.requesters));
  1805. scic->power_control.phys_waiting = 0;
  1806. scic->power_control.phys_granted_power = 0;
  1807. }
  1808. static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
  1809. {
  1810. struct sci_base_state_machine *sm = &scic->sm;
  1811. struct isci_host *ihost = scic_to_ihost(scic);
  1812. enum sci_status result = SCI_FAILURE;
  1813. unsigned long i, state, val;
  1814. if (scic->sm.current_state_id != SCIC_RESET) {
  1815. dev_warn(scic_to_dev(scic),
  1816. "SCIC Controller initialize operation requested "
  1817. "in invalid state\n");
  1818. return SCI_FAILURE_INVALID_STATE;
  1819. }
  1820. sci_change_state(sm, SCIC_INITIALIZING);
  1821. sci_init_timer(&scic->phy_timer, phy_startup_timeout);
  1822. scic->next_phy_to_start = 0;
  1823. scic->phy_startup_timer_pending = false;
  1824. scic_sds_controller_initialize_power_control(scic);
  1825. /*
  1826. * There is nothing to do here for B0 since we do not have to
  1827. * program the AFE registers.
  1828. * / @todo The AFE settings are supposed to be correct for the B0 but
  1829. * / presently they seem to be wrong. */
  1830. scic_sds_controller_afe_initialization(scic);
  1831. /* Take the hardware out of reset */
  1832. writel(0, &scic->smu_registers->soft_reset_control);
  1833. /*
  1834. * / @todo Provide meaningfull error code for hardware failure
  1835. * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
  1836. for (i = 100; i >= 1; i--) {
  1837. u32 status;
  1838. /* Loop until the hardware reports success */
  1839. udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
  1840. status = readl(&scic->smu_registers->control_status);
  1841. if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
  1842. break;
  1843. }
  1844. if (i == 0)
  1845. goto out;
  1846. /*
  1847. * Determine what are the actaul device capacities that the
  1848. * hardware will support */
  1849. val = readl(&scic->smu_registers->device_context_capacity);
  1850. /* Record the smaller of the two capacity values */
  1851. scic->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
  1852. scic->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
  1853. scic->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
  1854. /*
  1855. * Make all PEs that are unassigned match up with the
  1856. * logical ports
  1857. */
  1858. for (i = 0; i < scic->logical_port_entries; i++) {
  1859. struct scu_port_task_scheduler_group_registers __iomem
  1860. *ptsg = &scic->scu_registers->peg0.ptsg;
  1861. writel(i, &ptsg->protocol_engine[i]);
  1862. }
  1863. /* Initialize hardware PCI Relaxed ordering in DMA engines */
  1864. val = readl(&scic->scu_registers->sdma.pdma_configuration);
  1865. val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
  1866. writel(val, &scic->scu_registers->sdma.pdma_configuration);
  1867. val = readl(&scic->scu_registers->sdma.cdma_configuration);
  1868. val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
  1869. writel(val, &scic->scu_registers->sdma.cdma_configuration);
  1870. /*
  1871. * Initialize the PHYs before the PORTs because the PHY registers
  1872. * are accessed during the port initialization.
  1873. */
  1874. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1875. result = scic_sds_phy_initialize(&ihost->phys[i].sci,
  1876. &scic->scu_registers->peg0.pe[i].tl,
  1877. &scic->scu_registers->peg0.pe[i].ll);
  1878. if (result != SCI_SUCCESS)
  1879. goto out;
  1880. }
  1881. for (i = 0; i < scic->logical_port_entries; i++) {
  1882. result = scic_sds_port_initialize(&ihost->ports[i].sci,
  1883. &scic->scu_registers->peg0.ptsg.port[i],
  1884. &scic->scu_registers->peg0.ptsg.protocol_engine,
  1885. &scic->scu_registers->peg0.viit[i]);
  1886. if (result != SCI_SUCCESS)
  1887. goto out;
  1888. }
  1889. result = scic_sds_port_configuration_agent_initialize(scic, &scic->port_agent);
  1890. out:
  1891. /* Advance the controller state machine */
  1892. if (result == SCI_SUCCESS)
  1893. state = SCIC_INITIALIZED;
  1894. else
  1895. state = SCIC_FAILED;
  1896. sci_change_state(sm, state);
  1897. return result;
  1898. }
  1899. static enum sci_status scic_user_parameters_set(
  1900. struct scic_sds_controller *scic,
  1901. union scic_user_parameters *scic_parms)
  1902. {
  1903. u32 state = scic->sm.current_state_id;
  1904. if (state == SCIC_RESET ||
  1905. state == SCIC_INITIALIZING ||
  1906. state == SCIC_INITIALIZED) {
  1907. u16 index;
  1908. /*
  1909. * Validate the user parameters. If they are not legal, then
  1910. * return a failure.
  1911. */
  1912. for (index = 0; index < SCI_MAX_PHYS; index++) {
  1913. struct sci_phy_user_params *user_phy;
  1914. user_phy = &scic_parms->sds1.phys[index];
  1915. if (!((user_phy->max_speed_generation <=
  1916. SCIC_SDS_PARM_MAX_SPEED) &&
  1917. (user_phy->max_speed_generation >
  1918. SCIC_SDS_PARM_NO_SPEED)))
  1919. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1920. if (user_phy->in_connection_align_insertion_frequency <
  1921. 3)
  1922. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1923. if ((user_phy->in_connection_align_insertion_frequency <
  1924. 3) ||
  1925. (user_phy->align_insertion_frequency == 0) ||
  1926. (user_phy->
  1927. notify_enable_spin_up_insertion_frequency ==
  1928. 0))
  1929. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1930. }
  1931. if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
  1932. (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
  1933. (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
  1934. (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
  1935. (scic_parms->sds1.no_outbound_task_timeout == 0))
  1936. return SCI_FAILURE_INVALID_PARAMETER_VALUE;
  1937. memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
  1938. return SCI_SUCCESS;
  1939. }
  1940. return SCI_FAILURE_INVALID_STATE;
  1941. }
  1942. static int scic_controller_mem_init(struct scic_sds_controller *scic)
  1943. {
  1944. struct device *dev = scic_to_dev(scic);
  1945. dma_addr_t dma;
  1946. size_t size;
  1947. int err;
  1948. size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
  1949. scic->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
  1950. if (!scic->completion_queue)
  1951. return -ENOMEM;
  1952. writel(lower_32_bits(dma), &scic->smu_registers->completion_queue_lower);
  1953. writel(upper_32_bits(dma), &scic->smu_registers->completion_queue_upper);
  1954. size = scic->remote_node_entries * sizeof(union scu_remote_node_context);
  1955. scic->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
  1956. GFP_KERNEL);
  1957. if (!scic->remote_node_context_table)
  1958. return -ENOMEM;
  1959. writel(lower_32_bits(dma), &scic->smu_registers->remote_node_context_lower);
  1960. writel(upper_32_bits(dma), &scic->smu_registers->remote_node_context_upper);
  1961. size = scic->task_context_entries * sizeof(struct scu_task_context),
  1962. scic->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
  1963. if (!scic->task_context_table)
  1964. return -ENOMEM;
  1965. writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower);
  1966. writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper);
  1967. err = scic_sds_unsolicited_frame_control_construct(scic);
  1968. if (err)
  1969. return err;
  1970. /*
  1971. * Inform the silicon as to the location of the UF headers and
  1972. * address table.
  1973. */
  1974. writel(lower_32_bits(scic->uf_control.headers.physical_address),
  1975. &scic->scu_registers->sdma.uf_header_base_address_lower);
  1976. writel(upper_32_bits(scic->uf_control.headers.physical_address),
  1977. &scic->scu_registers->sdma.uf_header_base_address_upper);
  1978. writel(lower_32_bits(scic->uf_control.address_table.physical_address),
  1979. &scic->scu_registers->sdma.uf_address_table_lower);
  1980. writel(upper_32_bits(scic->uf_control.address_table.physical_address),
  1981. &scic->scu_registers->sdma.uf_address_table_upper);
  1982. return 0;
  1983. }
  1984. int isci_host_init(struct isci_host *isci_host)
  1985. {
  1986. int err = 0, i;
  1987. enum sci_status status;
  1988. union scic_oem_parameters oem;
  1989. union scic_user_parameters scic_user_params;
  1990. struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
  1991. spin_lock_init(&isci_host->state_lock);
  1992. spin_lock_init(&isci_host->scic_lock);
  1993. spin_lock_init(&isci_host->queue_lock);
  1994. init_waitqueue_head(&isci_host->eventq);
  1995. isci_host_change_state(isci_host, isci_starting);
  1996. isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
  1997. status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
  1998. smu_base(isci_host));
  1999. if (status != SCI_SUCCESS) {
  2000. dev_err(&isci_host->pdev->dev,
  2001. "%s: scic_controller_construct failed - status = %x\n",
  2002. __func__,
  2003. status);
  2004. return -ENODEV;
  2005. }
  2006. isci_host->sas_ha.dev = &isci_host->pdev->dev;
  2007. isci_host->sas_ha.lldd_ha = isci_host;
  2008. /*
  2009. * grab initial values stored in the controller object for OEM and USER
  2010. * parameters
  2011. */
  2012. isci_user_parameters_get(isci_host, &scic_user_params);
  2013. status = scic_user_parameters_set(&isci_host->sci,
  2014. &scic_user_params);
  2015. if (status != SCI_SUCCESS) {
  2016. dev_warn(&isci_host->pdev->dev,
  2017. "%s: scic_user_parameters_set failed\n",
  2018. __func__);
  2019. return -ENODEV;
  2020. }
  2021. scic_oem_parameters_get(&isci_host->sci, &oem);
  2022. /* grab any OEM parameters specified in orom */
  2023. if (pci_info->orom) {
  2024. status = isci_parse_oem_parameters(&oem,
  2025. pci_info->orom,
  2026. isci_host->id);
  2027. if (status != SCI_SUCCESS) {
  2028. dev_warn(&isci_host->pdev->dev,
  2029. "parsing firmware oem parameters failed\n");
  2030. return -EINVAL;
  2031. }
  2032. }
  2033. status = scic_oem_parameters_set(&isci_host->sci, &oem);
  2034. if (status != SCI_SUCCESS) {
  2035. dev_warn(&isci_host->pdev->dev,
  2036. "%s: scic_oem_parameters_set failed\n",
  2037. __func__);
  2038. return -ENODEV;
  2039. }
  2040. tasklet_init(&isci_host->completion_tasklet,
  2041. isci_host_completion_routine, (unsigned long)isci_host);
  2042. INIT_LIST_HEAD(&isci_host->requests_to_complete);
  2043. INIT_LIST_HEAD(&isci_host->requests_to_errorback);
  2044. spin_lock_irq(&isci_host->scic_lock);
  2045. status = scic_controller_initialize(&isci_host->sci);
  2046. spin_unlock_irq(&isci_host->scic_lock);
  2047. if (status != SCI_SUCCESS) {
  2048. dev_warn(&isci_host->pdev->dev,
  2049. "%s: scic_controller_initialize failed -"
  2050. " status = 0x%x\n",
  2051. __func__, status);
  2052. return -ENODEV;
  2053. }
  2054. err = scic_controller_mem_init(&isci_host->sci);
  2055. if (err)
  2056. return err;
  2057. isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
  2058. sizeof(struct isci_request),
  2059. SLAB_HWCACHE_ALIGN, 0);
  2060. if (!isci_host->dma_pool)
  2061. return -ENOMEM;
  2062. for (i = 0; i < SCI_MAX_PORTS; i++)
  2063. isci_port_init(&isci_host->ports[i], isci_host, i);
  2064. for (i = 0; i < SCI_MAX_PHYS; i++)
  2065. isci_phy_init(&isci_host->phys[i], isci_host, i);
  2066. for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
  2067. struct isci_remote_device *idev = &isci_host->devices[i];
  2068. INIT_LIST_HEAD(&idev->reqs_in_process);
  2069. INIT_LIST_HEAD(&idev->node);
  2070. spin_lock_init(&idev->state_lock);
  2071. }
  2072. return 0;
  2073. }
  2074. void scic_sds_controller_link_up(struct scic_sds_controller *scic,
  2075. struct scic_sds_port *port, struct scic_sds_phy *phy)
  2076. {
  2077. switch (scic->sm.current_state_id) {
  2078. case SCIC_STARTING:
  2079. sci_del_timer(&scic->phy_timer);
  2080. scic->phy_startup_timer_pending = false;
  2081. scic->port_agent.link_up_handler(scic, &scic->port_agent,
  2082. port, phy);
  2083. scic_sds_controller_start_next_phy(scic);
  2084. break;
  2085. case SCIC_READY:
  2086. scic->port_agent.link_up_handler(scic, &scic->port_agent,
  2087. port, phy);
  2088. break;
  2089. default:
  2090. dev_dbg(scic_to_dev(scic),
  2091. "%s: SCIC Controller linkup event from phy %d in "
  2092. "unexpected state %d\n", __func__, phy->phy_index,
  2093. scic->sm.current_state_id);
  2094. }
  2095. }
  2096. void scic_sds_controller_link_down(struct scic_sds_controller *scic,
  2097. struct scic_sds_port *port, struct scic_sds_phy *phy)
  2098. {
  2099. switch (scic->sm.current_state_id) {
  2100. case SCIC_STARTING:
  2101. case SCIC_READY:
  2102. scic->port_agent.link_down_handler(scic, &scic->port_agent,
  2103. port, phy);
  2104. break;
  2105. default:
  2106. dev_dbg(scic_to_dev(scic),
  2107. "%s: SCIC Controller linkdown event from phy %d in "
  2108. "unexpected state %d\n",
  2109. __func__,
  2110. phy->phy_index,
  2111. scic->sm.current_state_id);
  2112. }
  2113. }
  2114. /**
  2115. * This is a helper method to determine if any remote devices on this
  2116. * controller are still in the stopping state.
  2117. *
  2118. */
  2119. static bool scic_sds_controller_has_remote_devices_stopping(
  2120. struct scic_sds_controller *controller)
  2121. {
  2122. u32 index;
  2123. for (index = 0; index < controller->remote_node_entries; index++) {
  2124. if ((controller->device_table[index] != NULL) &&
  2125. (controller->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
  2126. return true;
  2127. }
  2128. return false;
  2129. }
  2130. /**
  2131. * This method is called by the remote device to inform the controller
  2132. * object that the remote device has stopped.
  2133. */
  2134. void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
  2135. struct scic_sds_remote_device *sci_dev)
  2136. {
  2137. if (scic->sm.current_state_id != SCIC_STOPPING) {
  2138. dev_dbg(scic_to_dev(scic),
  2139. "SCIC Controller 0x%p remote device stopped event "
  2140. "from device 0x%p in unexpected state %d\n",
  2141. scic, sci_dev,
  2142. scic->sm.current_state_id);
  2143. return;
  2144. }
  2145. if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
  2146. sci_change_state(&scic->sm, SCIC_STOPPED);
  2147. }
  2148. }
  2149. /**
  2150. * This method will write to the SCU PCP register the request value. The method
  2151. * is used to suspend/resume ports, devices, and phys.
  2152. * @scic:
  2153. *
  2154. *
  2155. */
  2156. void scic_sds_controller_post_request(
  2157. struct scic_sds_controller *scic,
  2158. u32 request)
  2159. {
  2160. dev_dbg(scic_to_dev(scic),
  2161. "%s: SCIC Controller 0x%p post request 0x%08x\n",
  2162. __func__,
  2163. scic,
  2164. request);
  2165. writel(request, &scic->smu_registers->post_context_port);
  2166. }
  2167. /**
  2168. * This method will copy the soft copy of the task context into the physical
  2169. * memory accessible by the controller.
  2170. * @scic: This parameter specifies the controller for which to copy
  2171. * the task context.
  2172. * @sci_req: This parameter specifies the request for which the task
  2173. * context is being copied.
  2174. *
  2175. * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
  2176. * the physical memory version of the task context. Thus, all subsequent
  2177. * updates to the task context are performed in the TC table (i.e. DMAable
  2178. * memory). none
  2179. */
  2180. void scic_sds_controller_copy_task_context(
  2181. struct scic_sds_controller *scic,
  2182. struct scic_sds_request *sci_req)
  2183. {
  2184. struct scu_task_context *task_context_buffer;
  2185. task_context_buffer = scic_sds_controller_get_task_context_buffer(
  2186. scic, sci_req->io_tag);
  2187. memcpy(task_context_buffer,
  2188. sci_req->task_context_buffer,
  2189. offsetof(struct scu_task_context, sgl_snapshot_ac));
  2190. /*
  2191. * Now that the soft copy of the TC has been copied into the TC
  2192. * table accessible by the silicon. Thus, any further changes to
  2193. * the TC (e.g. TC termination) occur in the appropriate location. */
  2194. sci_req->task_context_buffer = task_context_buffer;
  2195. }
  2196. struct scu_task_context *scic_sds_controller_get_task_context_buffer(struct scic_sds_controller *scic,
  2197. u16 io_tag)
  2198. {
  2199. u16 tci = ISCI_TAG_TCI(io_tag);
  2200. if (tci < scic->task_context_entries) {
  2201. return &scic->task_context_table[tci];
  2202. }
  2203. return NULL;
  2204. }
  2205. struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag)
  2206. {
  2207. u16 task_index;
  2208. u16 task_sequence;
  2209. task_index = ISCI_TAG_TCI(io_tag);
  2210. if (task_index < scic->task_context_entries) {
  2211. if (scic->io_request_table[task_index] != NULL) {
  2212. task_sequence = ISCI_TAG_SEQ(io_tag);
  2213. if (task_sequence == scic->io_request_sequence[task_index]) {
  2214. return scic->io_request_table[task_index];
  2215. }
  2216. }
  2217. }
  2218. return NULL;
  2219. }
  2220. /**
  2221. * This method allocates remote node index and the reserves the remote node
  2222. * context space for use. This method can fail if there are no more remote
  2223. * node index available.
  2224. * @scic: This is the controller object which contains the set of
  2225. * free remote node ids
  2226. * @sci_dev: This is the device object which is requesting the a remote node
  2227. * id
  2228. * @node_id: This is the remote node id that is assinged to the device if one
  2229. * is available
  2230. *
  2231. * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
  2232. * node index available.
  2233. */
  2234. enum sci_status scic_sds_controller_allocate_remote_node_context(
  2235. struct scic_sds_controller *scic,
  2236. struct scic_sds_remote_device *sci_dev,
  2237. u16 *node_id)
  2238. {
  2239. u16 node_index;
  2240. u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
  2241. node_index = scic_sds_remote_node_table_allocate_remote_node(
  2242. &scic->available_remote_nodes, remote_node_count
  2243. );
  2244. if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
  2245. scic->device_table[node_index] = sci_dev;
  2246. *node_id = node_index;
  2247. return SCI_SUCCESS;
  2248. }
  2249. return SCI_FAILURE_INSUFFICIENT_RESOURCES;
  2250. }
  2251. /**
  2252. * This method frees the remote node index back to the available pool. Once
  2253. * this is done the remote node context buffer is no longer valid and can
  2254. * not be used.
  2255. * @scic:
  2256. * @sci_dev:
  2257. * @node_id:
  2258. *
  2259. */
  2260. void scic_sds_controller_free_remote_node_context(
  2261. struct scic_sds_controller *scic,
  2262. struct scic_sds_remote_device *sci_dev,
  2263. u16 node_id)
  2264. {
  2265. u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
  2266. if (scic->device_table[node_id] == sci_dev) {
  2267. scic->device_table[node_id] = NULL;
  2268. scic_sds_remote_node_table_release_remote_node_index(
  2269. &scic->available_remote_nodes, remote_node_count, node_id
  2270. );
  2271. }
  2272. }
  2273. /**
  2274. * This method returns the union scu_remote_node_context for the specified remote
  2275. * node id.
  2276. * @scic:
  2277. * @node_id:
  2278. *
  2279. * union scu_remote_node_context*
  2280. */
  2281. union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
  2282. struct scic_sds_controller *scic,
  2283. u16 node_id
  2284. ) {
  2285. if (
  2286. (node_id < scic->remote_node_entries)
  2287. && (scic->device_table[node_id] != NULL)
  2288. ) {
  2289. return &scic->remote_node_context_table[node_id];
  2290. }
  2291. return NULL;
  2292. }
  2293. /**
  2294. *
  2295. * @resposne_buffer: This is the buffer into which the D2H register FIS will be
  2296. * constructed.
  2297. * @frame_header: This is the frame header returned by the hardware.
  2298. * @frame_buffer: This is the frame buffer returned by the hardware.
  2299. *
  2300. * This method will combind the frame header and frame buffer to create a SATA
  2301. * D2H register FIS none
  2302. */
  2303. void scic_sds_controller_copy_sata_response(
  2304. void *response_buffer,
  2305. void *frame_header,
  2306. void *frame_buffer)
  2307. {
  2308. memcpy(response_buffer, frame_header, sizeof(u32));
  2309. memcpy(response_buffer + sizeof(u32),
  2310. frame_buffer,
  2311. sizeof(struct dev_to_host_fis) - sizeof(u32));
  2312. }
  2313. /**
  2314. * This method releases the frame once this is done the frame is available for
  2315. * re-use by the hardware. The data contained in the frame header and frame
  2316. * buffer is no longer valid. The UF queue get pointer is only updated if UF
  2317. * control indicates this is appropriate.
  2318. * @scic:
  2319. * @frame_index:
  2320. *
  2321. */
  2322. void scic_sds_controller_release_frame(
  2323. struct scic_sds_controller *scic,
  2324. u32 frame_index)
  2325. {
  2326. if (scic_sds_unsolicited_frame_control_release_frame(
  2327. &scic->uf_control, frame_index) == true)
  2328. writel(scic->uf_control.get,
  2329. &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
  2330. }
  2331. /**
  2332. * scic_controller_start_io() - This method is called by the SCI user to
  2333. * send/start an IO request. If the method invocation is successful, then
  2334. * the IO request has been queued to the hardware for processing.
  2335. * @controller: the handle to the controller object for which to start an IO
  2336. * request.
  2337. * @remote_device: the handle to the remote device object for which to start an
  2338. * IO request.
  2339. * @io_request: the handle to the io request object to start.
  2340. * @io_tag: This parameter specifies a previously allocated IO tag that the
  2341. * user desires to be utilized for this request. This parameter is optional.
  2342. * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
  2343. * for this parameter.
  2344. *
  2345. * - IO tags are a protected resource. It is incumbent upon the SCI Core user
  2346. * to ensure that each of the methods that may allocate or free available IO
  2347. * tags are handled in a mutually exclusive manner. This method is one of said
  2348. * methods requiring proper critical code section protection (e.g. semaphore,
  2349. * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
  2350. * result, it is expected the user will have set the NCQ tag field in the host
  2351. * to device register FIS prior to calling this method. There is also a
  2352. * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
  2353. * the scic_controller_start_io() method. scic_controller_allocate_tag() for
  2354. * more information on allocating a tag. Indicate if the controller
  2355. * successfully started the IO request. SCI_SUCCESS if the IO request was
  2356. * successfully started. Determine the failure situations and return values.
  2357. */
  2358. enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
  2359. struct scic_sds_remote_device *rdev,
  2360. struct scic_sds_request *req,
  2361. u16 io_tag)
  2362. {
  2363. enum sci_status status;
  2364. if (scic->sm.current_state_id != SCIC_READY) {
  2365. dev_warn(scic_to_dev(scic), "invalid state to start I/O");
  2366. return SCI_FAILURE_INVALID_STATE;
  2367. }
  2368. status = scic_sds_remote_device_start_io(scic, rdev, req);
  2369. if (status != SCI_SUCCESS)
  2370. return status;
  2371. scic->io_request_table[ISCI_TAG_TCI(req->io_tag)] = req;
  2372. scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
  2373. return SCI_SUCCESS;
  2374. }
  2375. /**
  2376. * scic_controller_terminate_request() - This method is called by the SCI Core
  2377. * user to terminate an ongoing (i.e. started) core IO request. This does
  2378. * not abort the IO request at the target, but rather removes the IO request
  2379. * from the host controller.
  2380. * @controller: the handle to the controller object for which to terminate a
  2381. * request.
  2382. * @remote_device: the handle to the remote device object for which to
  2383. * terminate a request.
  2384. * @request: the handle to the io or task management request object to
  2385. * terminate.
  2386. *
  2387. * Indicate if the controller successfully began the terminate process for the
  2388. * IO request. SCI_SUCCESS if the terminate process was successfully started
  2389. * for the request. Determine the failure situations and return values.
  2390. */
  2391. enum sci_status scic_controller_terminate_request(
  2392. struct scic_sds_controller *scic,
  2393. struct scic_sds_remote_device *rdev,
  2394. struct scic_sds_request *req)
  2395. {
  2396. enum sci_status status;
  2397. if (scic->sm.current_state_id != SCIC_READY) {
  2398. dev_warn(scic_to_dev(scic),
  2399. "invalid state to terminate request\n");
  2400. return SCI_FAILURE_INVALID_STATE;
  2401. }
  2402. status = scic_sds_io_request_terminate(req);
  2403. if (status != SCI_SUCCESS)
  2404. return status;
  2405. /*
  2406. * Utilize the original post context command and or in the POST_TC_ABORT
  2407. * request sub-type.
  2408. */
  2409. scic_sds_controller_post_request(scic,
  2410. scic_sds_request_get_post_context(req) |
  2411. SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
  2412. return SCI_SUCCESS;
  2413. }
  2414. /**
  2415. * scic_controller_complete_io() - This method will perform core specific
  2416. * completion operations for an IO request. After this method is invoked,
  2417. * the user should consider the IO request as invalid until it is properly
  2418. * reused (i.e. re-constructed).
  2419. * @controller: The handle to the controller object for which to complete the
  2420. * IO request.
  2421. * @remote_device: The handle to the remote device object for which to complete
  2422. * the IO request.
  2423. * @io_request: the handle to the io request object to complete.
  2424. *
  2425. * - IO tags are a protected resource. It is incumbent upon the SCI Core user
  2426. * to ensure that each of the methods that may allocate or free available IO
  2427. * tags are handled in a mutually exclusive manner. This method is one of said
  2428. * methods requiring proper critical code section protection (e.g. semaphore,
  2429. * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
  2430. * Core user, using the scic_controller_allocate_io_tag() method, then it is
  2431. * the responsibility of the caller to invoke the scic_controller_free_io_tag()
  2432. * method to free the tag (i.e. this method will not free the IO tag). Indicate
  2433. * if the controller successfully completed the IO request. SCI_SUCCESS if the
  2434. * completion process was successful.
  2435. */
  2436. enum sci_status scic_controller_complete_io(
  2437. struct scic_sds_controller *scic,
  2438. struct scic_sds_remote_device *rdev,
  2439. struct scic_sds_request *request)
  2440. {
  2441. enum sci_status status;
  2442. u16 index;
  2443. switch (scic->sm.current_state_id) {
  2444. case SCIC_STOPPING:
  2445. /* XXX: Implement this function */
  2446. return SCI_FAILURE;
  2447. case SCIC_READY:
  2448. status = scic_sds_remote_device_complete_io(scic, rdev, request);
  2449. if (status != SCI_SUCCESS)
  2450. return status;
  2451. index = ISCI_TAG_TCI(request->io_tag);
  2452. scic->io_request_table[index] = NULL;
  2453. return SCI_SUCCESS;
  2454. default:
  2455. dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
  2456. return SCI_FAILURE_INVALID_STATE;
  2457. }
  2458. }
  2459. enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
  2460. {
  2461. struct scic_sds_controller *scic = sci_req->owning_controller;
  2462. if (scic->sm.current_state_id != SCIC_READY) {
  2463. dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
  2464. return SCI_FAILURE_INVALID_STATE;
  2465. }
  2466. scic->io_request_table[ISCI_TAG_TCI(sci_req->io_tag)] = sci_req;
  2467. scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
  2468. return SCI_SUCCESS;
  2469. }
  2470. /**
  2471. * scic_controller_start_task() - This method is called by the SCIC user to
  2472. * send/start a framework task management request.
  2473. * @controller: the handle to the controller object for which to start the task
  2474. * management request.
  2475. * @remote_device: the handle to the remote device object for which to start
  2476. * the task management request.
  2477. * @task_request: the handle to the task request object to start.
  2478. * @io_tag: This parameter specifies a previously allocated IO tag that the
  2479. * user desires to be utilized for this request. Note this not the io_tag
  2480. * of the request being managed. It is to be utilized for the task request
  2481. * itself. This parameter is optional. The user is allowed to supply
  2482. * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
  2483. *
  2484. * - IO tags are a protected resource. It is incumbent upon the SCI Core user
  2485. * to ensure that each of the methods that may allocate or free available IO
  2486. * tags are handled in a mutually exclusive manner. This method is one of said
  2487. * methods requiring proper critical code section protection (e.g. semaphore,
  2488. * spin-lock, etc.). - The user must synchronize this task with completion
  2489. * queue processing. If they are not synchronized then it is possible for the
  2490. * io requests that are being managed by the task request can complete before
  2491. * starting the task request. scic_controller_allocate_tag() for more
  2492. * information on allocating a tag. Indicate if the controller successfully
  2493. * started the IO request. SCI_TASK_SUCCESS if the task request was
  2494. * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
  2495. * returned if there is/are task(s) outstanding that require termination or
  2496. * completion before this request can succeed.
  2497. */
  2498. enum sci_task_status scic_controller_start_task(
  2499. struct scic_sds_controller *scic,
  2500. struct scic_sds_remote_device *rdev,
  2501. struct scic_sds_request *req,
  2502. u16 task_tag)
  2503. {
  2504. enum sci_status status;
  2505. if (scic->sm.current_state_id != SCIC_READY) {
  2506. dev_warn(scic_to_dev(scic),
  2507. "%s: SCIC Controller starting task from invalid "
  2508. "state\n",
  2509. __func__);
  2510. return SCI_TASK_FAILURE_INVALID_STATE;
  2511. }
  2512. status = scic_sds_remote_device_start_task(scic, rdev, req);
  2513. switch (status) {
  2514. case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
  2515. scic->io_request_table[ISCI_TAG_TCI(req->io_tag)] = req;
  2516. /*
  2517. * We will let framework know this task request started successfully,
  2518. * although core is still woring on starting the request (to post tc when
  2519. * RNC is resumed.)
  2520. */
  2521. return SCI_SUCCESS;
  2522. case SCI_SUCCESS:
  2523. scic->io_request_table[ISCI_TAG_TCI(req->io_tag)] = req;
  2524. scic_sds_controller_post_request(scic,
  2525. scic_sds_request_get_post_context(req));
  2526. break;
  2527. default:
  2528. break;
  2529. }
  2530. return status;
  2531. }
  2532. /**
  2533. * scic_controller_allocate_io_tag() - This method will allocate a tag from the
  2534. * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
  2535. * is optional. The scic_controller_start_io() method will allocate an IO
  2536. * tag if this method is not utilized and the tag is not supplied to the IO
  2537. * construct routine. Direct allocation of IO tags may provide additional
  2538. * performance improvements in environments capable of supporting this usage
  2539. * model. Additionally, direct allocation of IO tags also provides
  2540. * additional flexibility to the SCI Core user. Specifically, the user may
  2541. * retain IO tags across the lives of multiple IO requests.
  2542. * @controller: the handle to the controller object for which to allocate the
  2543. * tag.
  2544. *
  2545. * IO tags are a protected resource. It is incumbent upon the SCI Core user to
  2546. * ensure that each of the methods that may allocate or free available IO tags
  2547. * are handled in a mutually exclusive manner. This method is one of said
  2548. * methods requiring proper critical code section protection (e.g. semaphore,
  2549. * spin-lock, etc.). An unsigned integer representing an available IO tag.
  2550. * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
  2551. * currently available tags to be allocated. All return other values indicate a
  2552. * legitimate tag.
  2553. */
  2554. u16 scic_controller_allocate_io_tag(struct scic_sds_controller *scic)
  2555. {
  2556. struct isci_host *ihost = scic_to_ihost(scic);
  2557. if (isci_tci_space(ihost)) {
  2558. u16 tci = isci_tci_alloc(ihost);
  2559. u8 seq = scic->io_request_sequence[tci];
  2560. return ISCI_TAG(seq, tci);
  2561. }
  2562. return SCI_CONTROLLER_INVALID_IO_TAG;
  2563. }
  2564. /**
  2565. * scic_controller_free_io_tag() - This method will free an IO tag to the pool
  2566. * of free IO tags. This method provides the SCI Core user more flexibility
  2567. * with regards to IO tags. The user may desire to keep an IO tag after an
  2568. * IO request has completed, because they plan on re-using the tag for a
  2569. * subsequent IO request. This method is only legal if the tag was
  2570. * allocated via scic_controller_allocate_io_tag().
  2571. * @controller: This parameter specifies the handle to the controller object
  2572. * for which to free/return the tag.
  2573. * @io_tag: This parameter represents the tag to be freed to the pool of
  2574. * available tags.
  2575. *
  2576. * - IO tags are a protected resource. It is incumbent upon the SCI Core user
  2577. * to ensure that each of the methods that may allocate or free available IO
  2578. * tags are handled in a mutually exclusive manner. This method is one of said
  2579. * methods requiring proper critical code section protection (e.g. semaphore,
  2580. * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
  2581. * Core user, using the scic_controller_allocate_io_tag() method, then it is
  2582. * the responsibility of the caller to invoke this method to free the tag. This
  2583. * method returns an indication of whether the tag was successfully put back
  2584. * (freed) to the pool of available tags. SCI_SUCCESS This return value
  2585. * indicates the tag was successfully placed into the pool of available IO
  2586. * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
  2587. * is not a valid IO tag value.
  2588. */
  2589. enum sci_status scic_controller_free_io_tag(struct scic_sds_controller *scic,
  2590. u16 io_tag)
  2591. {
  2592. struct isci_host *ihost = scic_to_ihost(scic);
  2593. u16 tci = ISCI_TAG_TCI(io_tag);
  2594. u16 seq = ISCI_TAG_SEQ(io_tag);
  2595. /* prevent tail from passing head */
  2596. if (isci_tci_active(ihost) == 0)
  2597. return SCI_FAILURE_INVALID_IO_TAG;
  2598. if (seq == scic->io_request_sequence[tci]) {
  2599. scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
  2600. isci_tci_free(ihost, ISCI_TAG_TCI(io_tag));
  2601. return SCI_SUCCESS;
  2602. }
  2603. return SCI_FAILURE_INVALID_IO_TAG;
  2604. }