mv_sas.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199
  1. /*
  2. * Marvell 88SE64xx/88SE94xx main function
  3. *
  4. * Copyright 2007 Red Hat, Inc.
  5. * Copyright 2008 Marvell. <kewei@marvell.com>
  6. * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
  7. *
  8. * This file is licensed under GPLv2.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; version 2 of the
  13. * License.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  23. * USA
  24. */
  25. #include "mv_sas.h"
  26. static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
  27. {
  28. if (task->lldd_task) {
  29. struct mvs_slot_info *slot;
  30. slot = task->lldd_task;
  31. *tag = slot->slot_tag;
  32. return 1;
  33. }
  34. return 0;
  35. }
  36. void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
  37. {
  38. void *bitmap = mvi->tags;
  39. clear_bit(tag, bitmap);
  40. }
  41. void mvs_tag_free(struct mvs_info *mvi, u32 tag)
  42. {
  43. mvs_tag_clear(mvi, tag);
  44. }
  45. void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
  46. {
  47. void *bitmap = mvi->tags;
  48. set_bit(tag, bitmap);
  49. }
  50. inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
  51. {
  52. unsigned int index, tag;
  53. void *bitmap = mvi->tags;
  54. index = find_first_zero_bit(bitmap, mvi->tags_num);
  55. tag = index;
  56. if (tag >= mvi->tags_num)
  57. return -SAS_QUEUE_FULL;
  58. mvs_tag_set(mvi, tag);
  59. *tag_out = tag;
  60. return 0;
  61. }
  62. void mvs_tag_init(struct mvs_info *mvi)
  63. {
  64. int i;
  65. for (i = 0; i < mvi->tags_num; ++i)
  66. mvs_tag_clear(mvi, i);
  67. }
  68. struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
  69. {
  70. unsigned long i = 0, j = 0, hi = 0;
  71. struct sas_ha_struct *sha = dev->port->ha;
  72. struct mvs_info *mvi = NULL;
  73. struct asd_sas_phy *phy;
  74. while (sha->sas_port[i]) {
  75. if (sha->sas_port[i] == dev->port) {
  76. phy = container_of(sha->sas_port[i]->phy_list.next,
  77. struct asd_sas_phy, port_phy_el);
  78. j = 0;
  79. while (sha->sas_phy[j]) {
  80. if (sha->sas_phy[j] == phy)
  81. break;
  82. j++;
  83. }
  84. break;
  85. }
  86. i++;
  87. }
  88. hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
  89. mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
  90. return mvi;
  91. }
  92. int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
  93. {
  94. unsigned long i = 0, j = 0, n = 0, num = 0;
  95. struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
  96. struct mvs_info *mvi = mvi_dev->mvi_info;
  97. struct sas_ha_struct *sha = dev->port->ha;
  98. while (sha->sas_port[i]) {
  99. if (sha->sas_port[i] == dev->port) {
  100. struct asd_sas_phy *phy;
  101. list_for_each_entry(phy,
  102. &sha->sas_port[i]->phy_list, port_phy_el) {
  103. j = 0;
  104. while (sha->sas_phy[j]) {
  105. if (sha->sas_phy[j] == phy)
  106. break;
  107. j++;
  108. }
  109. phyno[n] = (j >= mvi->chip->n_phy) ?
  110. (j - mvi->chip->n_phy) : j;
  111. num++;
  112. n++;
  113. }
  114. break;
  115. }
  116. i++;
  117. }
  118. return num;
  119. }
  120. struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
  121. u8 reg_set)
  122. {
  123. u32 dev_no;
  124. for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
  125. if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
  126. continue;
  127. if (mvi->devices[dev_no].taskfileset == reg_set)
  128. return &mvi->devices[dev_no];
  129. }
  130. return NULL;
  131. }
  132. static inline void mvs_free_reg_set(struct mvs_info *mvi,
  133. struct mvs_device *dev)
  134. {
  135. if (!dev) {
  136. mv_printk("device has been free.\n");
  137. return;
  138. }
  139. if (dev->taskfileset == MVS_ID_NOT_MAPPED)
  140. return;
  141. MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
  142. }
  143. static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
  144. struct mvs_device *dev)
  145. {
  146. if (dev->taskfileset != MVS_ID_NOT_MAPPED)
  147. return 0;
  148. return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
  149. }
  150. void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
  151. {
  152. u32 no;
  153. for_each_phy(phy_mask, phy_mask, no) {
  154. if (!(phy_mask & 1))
  155. continue;
  156. MVS_CHIP_DISP->phy_reset(mvi, no, hard);
  157. }
  158. }
  159. int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  160. void *funcdata)
  161. {
  162. int rc = 0, phy_id = sas_phy->id;
  163. u32 tmp, i = 0, hi;
  164. struct sas_ha_struct *sha = sas_phy->ha;
  165. struct mvs_info *mvi = NULL;
  166. while (sha->sas_phy[i]) {
  167. if (sha->sas_phy[i] == sas_phy)
  168. break;
  169. i++;
  170. }
  171. hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
  172. mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
  173. switch (func) {
  174. case PHY_FUNC_SET_LINK_RATE:
  175. MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
  176. break;
  177. case PHY_FUNC_HARD_RESET:
  178. tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
  179. if (tmp & PHY_RST_HARD)
  180. break;
  181. MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
  182. break;
  183. case PHY_FUNC_LINK_RESET:
  184. MVS_CHIP_DISP->phy_enable(mvi, phy_id);
  185. MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
  186. break;
  187. case PHY_FUNC_DISABLE:
  188. MVS_CHIP_DISP->phy_disable(mvi, phy_id);
  189. break;
  190. case PHY_FUNC_RELEASE_SPINUP_HOLD:
  191. default:
  192. rc = -ENOSYS;
  193. }
  194. msleep(200);
  195. return rc;
  196. }
  197. void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
  198. u32 off_hi, u64 sas_addr)
  199. {
  200. u32 lo = (u32)sas_addr;
  201. u32 hi = (u32)(sas_addr>>32);
  202. MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
  203. MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
  204. MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
  205. MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
  206. }
  207. static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
  208. {
  209. struct mvs_phy *phy = &mvi->phy[i];
  210. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  211. struct sas_ha_struct *sas_ha;
  212. if (!phy->phy_attached)
  213. return;
  214. if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
  215. && phy->phy_type & PORT_TYPE_SAS) {
  216. return;
  217. }
  218. sas_ha = mvi->sas;
  219. sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
  220. if (sas_phy->phy) {
  221. struct sas_phy *sphy = sas_phy->phy;
  222. sphy->negotiated_linkrate = sas_phy->linkrate;
  223. sphy->minimum_linkrate = phy->minimum_linkrate;
  224. sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
  225. sphy->maximum_linkrate = phy->maximum_linkrate;
  226. sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
  227. }
  228. if (phy->phy_type & PORT_TYPE_SAS) {
  229. struct sas_identify_frame *id;
  230. id = (struct sas_identify_frame *)phy->frame_rcvd;
  231. id->dev_type = phy->identify.device_type;
  232. id->initiator_bits = SAS_PROTOCOL_ALL;
  233. id->target_bits = phy->identify.target_port_protocols;
  234. /* direct attached SAS device */
  235. if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
  236. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
  237. MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00);
  238. }
  239. } else if (phy->phy_type & PORT_TYPE_SATA) {
  240. /*Nothing*/
  241. }
  242. mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
  243. sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
  244. mvi->sas->notify_port_event(sas_phy,
  245. PORTE_BYTES_DMAED);
  246. }
  247. void mvs_scan_start(struct Scsi_Host *shost)
  248. {
  249. int i, j;
  250. unsigned short core_nr;
  251. struct mvs_info *mvi;
  252. struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
  253. struct mvs_prv_info *mvs_prv = sha->lldd_ha;
  254. core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
  255. for (j = 0; j < core_nr; j++) {
  256. mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
  257. for (i = 0; i < mvi->chip->n_phy; ++i)
  258. mvs_bytes_dmaed(mvi, i);
  259. }
  260. mvs_prv->scan_finished = 1;
  261. }
  262. int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
  263. {
  264. struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
  265. struct mvs_prv_info *mvs_prv = sha->lldd_ha;
  266. if (mvs_prv->scan_finished == 0)
  267. return 0;
  268. sas_drain_work(sha);
  269. return 1;
  270. }
  271. static int mvs_task_prep_smp(struct mvs_info *mvi,
  272. struct mvs_task_exec_info *tei)
  273. {
  274. int elem, rc, i;
  275. struct sas_ha_struct *sha = mvi->sas;
  276. struct sas_task *task = tei->task;
  277. struct mvs_cmd_hdr *hdr = tei->hdr;
  278. struct domain_device *dev = task->dev;
  279. struct asd_sas_port *sas_port = dev->port;
  280. struct sas_phy *sphy = dev->phy;
  281. struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
  282. struct scatterlist *sg_req, *sg_resp;
  283. u32 req_len, resp_len, tag = tei->tag;
  284. void *buf_tmp;
  285. u8 *buf_oaf;
  286. dma_addr_t buf_tmp_dma;
  287. void *buf_prd;
  288. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  289. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  290. /*
  291. * DMA-map SMP request, response buffers
  292. */
  293. sg_req = &task->smp_task.smp_req;
  294. elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
  295. if (!elem)
  296. return -ENOMEM;
  297. req_len = sg_dma_len(sg_req);
  298. sg_resp = &task->smp_task.smp_resp;
  299. elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
  300. if (!elem) {
  301. rc = -ENOMEM;
  302. goto err_out;
  303. }
  304. resp_len = SB_RFB_MAX;
  305. /* must be in dwords */
  306. if ((req_len & 0x3) || (resp_len & 0x3)) {
  307. rc = -EINVAL;
  308. goto err_out_2;
  309. }
  310. /*
  311. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  312. */
  313. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
  314. buf_tmp = slot->buf;
  315. buf_tmp_dma = slot->buf_dma;
  316. hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
  317. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  318. buf_oaf = buf_tmp;
  319. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  320. buf_tmp += MVS_OAF_SZ;
  321. buf_tmp_dma += MVS_OAF_SZ;
  322. /* region 3: PRD table *********************************** */
  323. buf_prd = buf_tmp;
  324. if (tei->n_elem)
  325. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  326. else
  327. hdr->prd_tbl = 0;
  328. i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
  329. buf_tmp += i;
  330. buf_tmp_dma += i;
  331. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  332. slot->response = buf_tmp;
  333. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  334. if (mvi->flags & MVF_FLAG_SOC)
  335. hdr->reserved[0] = 0;
  336. /*
  337. * Fill in TX ring and command slot header
  338. */
  339. slot->tx = mvi->tx_prod;
  340. mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
  341. TXQ_MODE_I | tag |
  342. (MVS_PHY_ID << TXQ_PHY_SHIFT));
  343. hdr->flags |= flags;
  344. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
  345. hdr->tags = cpu_to_le32(tag);
  346. hdr->data_len = 0;
  347. /* generate open address frame hdr (first 12 bytes) */
  348. /* initiator, SMP, ftype 1h */
  349. buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
  350. buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
  351. *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
  352. memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
  353. /* fill in PRD (scatter/gather) table, if any */
  354. MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
  355. return 0;
  356. err_out_2:
  357. dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
  358. PCI_DMA_FROMDEVICE);
  359. err_out:
  360. dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
  361. PCI_DMA_TODEVICE);
  362. return rc;
  363. }
  364. static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
  365. {
  366. struct ata_queued_cmd *qc = task->uldd_task;
  367. if (qc) {
  368. if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
  369. qc->tf.command == ATA_CMD_FPDMA_READ) {
  370. *tag = qc->tag;
  371. return 1;
  372. }
  373. }
  374. return 0;
  375. }
  376. static int mvs_task_prep_ata(struct mvs_info *mvi,
  377. struct mvs_task_exec_info *tei)
  378. {
  379. struct sas_ha_struct *sha = mvi->sas;
  380. struct sas_task *task = tei->task;
  381. struct domain_device *dev = task->dev;
  382. struct mvs_device *mvi_dev = dev->lldd_dev;
  383. struct mvs_cmd_hdr *hdr = tei->hdr;
  384. struct asd_sas_port *sas_port = dev->port;
  385. struct sas_phy *sphy = dev->phy;
  386. struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
  387. struct mvs_slot_info *slot;
  388. void *buf_prd;
  389. u32 tag = tei->tag, hdr_tag;
  390. u32 flags, del_q;
  391. void *buf_tmp;
  392. u8 *buf_cmd, *buf_oaf;
  393. dma_addr_t buf_tmp_dma;
  394. u32 i, req_len, resp_len;
  395. const u32 max_resp_len = SB_RFB_MAX;
  396. if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
  397. mv_dprintk("Have not enough regiset for dev %d.\n",
  398. mvi_dev->device_id);
  399. return -EBUSY;
  400. }
  401. slot = &mvi->slot_info[tag];
  402. slot->tx = mvi->tx_prod;
  403. del_q = TXQ_MODE_I | tag |
  404. (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
  405. (MVS_PHY_ID << TXQ_PHY_SHIFT) |
  406. (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
  407. mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
  408. if (task->data_dir == DMA_FROM_DEVICE)
  409. flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
  410. else
  411. flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  412. if (task->ata_task.use_ncq)
  413. flags |= MCH_FPDMA;
  414. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
  415. if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
  416. flags |= MCH_ATAPI;
  417. }
  418. hdr->flags = cpu_to_le32(flags);
  419. if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
  420. task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
  421. else
  422. hdr_tag = tag;
  423. hdr->tags = cpu_to_le32(hdr_tag);
  424. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  425. /*
  426. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  427. */
  428. /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
  429. buf_cmd = buf_tmp = slot->buf;
  430. buf_tmp_dma = slot->buf_dma;
  431. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  432. buf_tmp += MVS_ATA_CMD_SZ;
  433. buf_tmp_dma += MVS_ATA_CMD_SZ;
  434. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  435. /* used for STP. unused for SATA? */
  436. buf_oaf = buf_tmp;
  437. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  438. buf_tmp += MVS_OAF_SZ;
  439. buf_tmp_dma += MVS_OAF_SZ;
  440. /* region 3: PRD table ********************************************* */
  441. buf_prd = buf_tmp;
  442. if (tei->n_elem)
  443. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  444. else
  445. hdr->prd_tbl = 0;
  446. i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
  447. buf_tmp += i;
  448. buf_tmp_dma += i;
  449. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  450. slot->response = buf_tmp;
  451. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  452. if (mvi->flags & MVF_FLAG_SOC)
  453. hdr->reserved[0] = 0;
  454. req_len = sizeof(struct host_to_dev_fis);
  455. resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
  456. sizeof(struct mvs_err_info) - i;
  457. /* request, response lengths */
  458. resp_len = min(resp_len, max_resp_len);
  459. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  460. if (likely(!task->ata_task.device_control_reg_update))
  461. task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
  462. /* fill in command FIS and ATAPI CDB */
  463. memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
  464. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
  465. memcpy(buf_cmd + STP_ATAPI_CMD,
  466. task->ata_task.atapi_packet, 16);
  467. /* generate open address frame hdr (first 12 bytes) */
  468. /* initiator, STP, ftype 1h */
  469. buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
  470. buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
  471. *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
  472. memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
  473. /* fill in PRD (scatter/gather) table, if any */
  474. MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
  475. if (task->data_dir == DMA_FROM_DEVICE)
  476. MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
  477. TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
  478. return 0;
  479. }
  480. static int mvs_task_prep_ssp(struct mvs_info *mvi,
  481. struct mvs_task_exec_info *tei, int is_tmf,
  482. struct mvs_tmf_task *tmf)
  483. {
  484. struct sas_task *task = tei->task;
  485. struct mvs_cmd_hdr *hdr = tei->hdr;
  486. struct mvs_port *port = tei->port;
  487. struct domain_device *dev = task->dev;
  488. struct mvs_device *mvi_dev = dev->lldd_dev;
  489. struct asd_sas_port *sas_port = dev->port;
  490. struct mvs_slot_info *slot;
  491. void *buf_prd;
  492. struct ssp_frame_hdr *ssp_hdr;
  493. void *buf_tmp;
  494. u8 *buf_cmd, *buf_oaf, fburst = 0;
  495. dma_addr_t buf_tmp_dma;
  496. u32 flags;
  497. u32 resp_len, req_len, i, tag = tei->tag;
  498. const u32 max_resp_len = SB_RFB_MAX;
  499. u32 phy_mask;
  500. slot = &mvi->slot_info[tag];
  501. phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
  502. sas_port->phy_mask) & TXQ_PHY_MASK;
  503. slot->tx = mvi->tx_prod;
  504. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  505. (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
  506. (phy_mask << TXQ_PHY_SHIFT));
  507. flags = MCH_RETRY;
  508. if (task->ssp_task.enable_first_burst) {
  509. flags |= MCH_FBURST;
  510. fburst = (1 << 7);
  511. }
  512. if (is_tmf)
  513. flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
  514. else
  515. flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
  516. hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
  517. hdr->tags = cpu_to_le32(tag);
  518. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  519. /*
  520. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  521. */
  522. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  523. buf_cmd = buf_tmp = slot->buf;
  524. buf_tmp_dma = slot->buf_dma;
  525. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  526. buf_tmp += MVS_SSP_CMD_SZ;
  527. buf_tmp_dma += MVS_SSP_CMD_SZ;
  528. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  529. buf_oaf = buf_tmp;
  530. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  531. buf_tmp += MVS_OAF_SZ;
  532. buf_tmp_dma += MVS_OAF_SZ;
  533. /* region 3: PRD table ********************************************* */
  534. buf_prd = buf_tmp;
  535. if (tei->n_elem)
  536. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  537. else
  538. hdr->prd_tbl = 0;
  539. i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
  540. buf_tmp += i;
  541. buf_tmp_dma += i;
  542. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  543. slot->response = buf_tmp;
  544. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  545. if (mvi->flags & MVF_FLAG_SOC)
  546. hdr->reserved[0] = 0;
  547. resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
  548. sizeof(struct mvs_err_info) - i;
  549. resp_len = min(resp_len, max_resp_len);
  550. req_len = sizeof(struct ssp_frame_hdr) + 28;
  551. /* request, response lengths */
  552. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  553. /* generate open address frame hdr (first 12 bytes) */
  554. /* initiator, SSP, ftype 1h */
  555. buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
  556. buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
  557. *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
  558. memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
  559. /* fill in SSP frame header (Command Table.SSP frame header) */
  560. ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
  561. if (is_tmf)
  562. ssp_hdr->frame_type = SSP_TASK;
  563. else
  564. ssp_hdr->frame_type = SSP_COMMAND;
  565. memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
  566. HASHED_SAS_ADDR_SIZE);
  567. memcpy(ssp_hdr->hashed_src_addr,
  568. dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  569. ssp_hdr->tag = cpu_to_be16(tag);
  570. /* fill in IU for TASK and Command Frame */
  571. buf_cmd += sizeof(*ssp_hdr);
  572. memcpy(buf_cmd, &task->ssp_task.LUN, 8);
  573. if (ssp_hdr->frame_type != SSP_TASK) {
  574. buf_cmd[9] = fburst | task->ssp_task.task_attr |
  575. (task->ssp_task.task_prio << 3);
  576. memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
  577. } else{
  578. buf_cmd[10] = tmf->tmf;
  579. switch (tmf->tmf) {
  580. case TMF_ABORT_TASK:
  581. case TMF_QUERY_TASK:
  582. buf_cmd[12] =
  583. (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
  584. buf_cmd[13] =
  585. tmf->tag_of_task_to_be_managed & 0xff;
  586. break;
  587. default:
  588. break;
  589. }
  590. }
  591. /* fill in PRD (scatter/gather) table, if any */
  592. MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
  593. return 0;
  594. }
  595. #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
  596. static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
  597. struct mvs_tmf_task *tmf, int *pass)
  598. {
  599. struct domain_device *dev = task->dev;
  600. struct mvs_device *mvi_dev = dev->lldd_dev;
  601. struct mvs_task_exec_info tei;
  602. struct mvs_slot_info *slot;
  603. u32 tag = 0xdeadbeef, n_elem = 0;
  604. int rc = 0;
  605. if (!dev->port) {
  606. struct task_status_struct *tsm = &task->task_status;
  607. tsm->resp = SAS_TASK_UNDELIVERED;
  608. tsm->stat = SAS_PHY_DOWN;
  609. /*
  610. * libsas will use dev->port, should
  611. * not call task_done for sata
  612. */
  613. if (dev->dev_type != SATA_DEV)
  614. task->task_done(task);
  615. return rc;
  616. }
  617. if (DEV_IS_GONE(mvi_dev)) {
  618. if (mvi_dev)
  619. mv_dprintk("device %d not ready.\n",
  620. mvi_dev->device_id);
  621. else
  622. mv_dprintk("device %016llx not ready.\n",
  623. SAS_ADDR(dev->sas_addr));
  624. rc = SAS_PHY_DOWN;
  625. return rc;
  626. }
  627. tei.port = dev->port->lldd_port;
  628. if (tei.port && !tei.port->port_attached && !tmf) {
  629. if (sas_protocol_ata(task->task_proto)) {
  630. struct task_status_struct *ts = &task->task_status;
  631. mv_dprintk("SATA/STP port %d does not attach"
  632. "device.\n", dev->port->id);
  633. ts->resp = SAS_TASK_COMPLETE;
  634. ts->stat = SAS_PHY_DOWN;
  635. task->task_done(task);
  636. } else {
  637. struct task_status_struct *ts = &task->task_status;
  638. mv_dprintk("SAS port %d does not attach"
  639. "device.\n", dev->port->id);
  640. ts->resp = SAS_TASK_UNDELIVERED;
  641. ts->stat = SAS_PHY_DOWN;
  642. task->task_done(task);
  643. }
  644. return rc;
  645. }
  646. if (!sas_protocol_ata(task->task_proto)) {
  647. if (task->num_scatter) {
  648. n_elem = dma_map_sg(mvi->dev,
  649. task->scatter,
  650. task->num_scatter,
  651. task->data_dir);
  652. if (!n_elem) {
  653. rc = -ENOMEM;
  654. goto prep_out;
  655. }
  656. }
  657. } else {
  658. n_elem = task->num_scatter;
  659. }
  660. rc = mvs_tag_alloc(mvi, &tag);
  661. if (rc)
  662. goto err_out;
  663. slot = &mvi->slot_info[tag];
  664. task->lldd_task = NULL;
  665. slot->n_elem = n_elem;
  666. slot->slot_tag = tag;
  667. slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
  668. if (!slot->buf)
  669. goto err_out_tag;
  670. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  671. tei.task = task;
  672. tei.hdr = &mvi->slot[tag];
  673. tei.tag = tag;
  674. tei.n_elem = n_elem;
  675. switch (task->task_proto) {
  676. case SAS_PROTOCOL_SMP:
  677. rc = mvs_task_prep_smp(mvi, &tei);
  678. break;
  679. case SAS_PROTOCOL_SSP:
  680. rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
  681. break;
  682. case SAS_PROTOCOL_SATA:
  683. case SAS_PROTOCOL_STP:
  684. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  685. rc = mvs_task_prep_ata(mvi, &tei);
  686. break;
  687. default:
  688. dev_printk(KERN_ERR, mvi->dev,
  689. "unknown sas_task proto: 0x%x\n",
  690. task->task_proto);
  691. rc = -EINVAL;
  692. break;
  693. }
  694. if (rc) {
  695. mv_dprintk("rc is %x\n", rc);
  696. goto err_out_slot_buf;
  697. }
  698. slot->task = task;
  699. slot->port = tei.port;
  700. task->lldd_task = slot;
  701. list_add_tail(&slot->entry, &tei.port->list);
  702. spin_lock(&task->task_state_lock);
  703. task->task_state_flags |= SAS_TASK_AT_INITIATOR;
  704. spin_unlock(&task->task_state_lock);
  705. mvi_dev->running_req++;
  706. ++(*pass);
  707. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  708. return rc;
  709. err_out_slot_buf:
  710. pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
  711. err_out_tag:
  712. mvs_tag_free(mvi, tag);
  713. err_out:
  714. dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
  715. if (!sas_protocol_ata(task->task_proto))
  716. if (n_elem)
  717. dma_unmap_sg(mvi->dev, task->scatter, n_elem,
  718. task->data_dir);
  719. prep_out:
  720. return rc;
  721. }
  722. static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
  723. {
  724. struct mvs_task_list *first = NULL;
  725. for (; *num > 0; --*num) {
  726. struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
  727. if (!mvs_list)
  728. break;
  729. INIT_LIST_HEAD(&mvs_list->list);
  730. if (!first)
  731. first = mvs_list;
  732. else
  733. list_add_tail(&mvs_list->list, &first->list);
  734. }
  735. return first;
  736. }
  737. static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
  738. {
  739. LIST_HEAD(list);
  740. struct list_head *pos, *a;
  741. struct mvs_task_list *mlist = NULL;
  742. __list_add(&list, mvs_list->list.prev, &mvs_list->list);
  743. list_for_each_safe(pos, a, &list) {
  744. list_del_init(pos);
  745. mlist = list_entry(pos, struct mvs_task_list, list);
  746. kmem_cache_free(mvs_task_list_cache, mlist);
  747. }
  748. }
  749. static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
  750. struct completion *completion, int is_tmf,
  751. struct mvs_tmf_task *tmf)
  752. {
  753. struct mvs_info *mvi = NULL;
  754. u32 rc = 0;
  755. u32 pass = 0;
  756. unsigned long flags = 0;
  757. mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
  758. spin_lock_irqsave(&mvi->lock, flags);
  759. rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
  760. if (rc)
  761. dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
  762. if (likely(pass))
  763. MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
  764. (MVS_CHIP_SLOT_SZ - 1));
  765. spin_unlock_irqrestore(&mvi->lock, flags);
  766. return rc;
  767. }
  768. static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
  769. struct completion *completion, int is_tmf,
  770. struct mvs_tmf_task *tmf)
  771. {
  772. struct domain_device *dev = task->dev;
  773. struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
  774. struct mvs_info *mvi = NULL;
  775. struct sas_task *t = task;
  776. struct mvs_task_list *mvs_list = NULL, *a;
  777. LIST_HEAD(q);
  778. int pass[2] = {0};
  779. u32 rc = 0;
  780. u32 n = num;
  781. unsigned long flags = 0;
  782. mvs_list = mvs_task_alloc_list(&n, gfp_flags);
  783. if (n) {
  784. printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
  785. rc = -ENOMEM;
  786. goto free_list;
  787. }
  788. __list_add(&q, mvs_list->list.prev, &mvs_list->list);
  789. list_for_each_entry(a, &q, list) {
  790. a->task = t;
  791. t = list_entry(t->list.next, struct sas_task, list);
  792. }
  793. list_for_each_entry(a, &q , list) {
  794. t = a->task;
  795. mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
  796. spin_lock_irqsave(&mvi->lock, flags);
  797. rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
  798. if (rc)
  799. dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
  800. spin_unlock_irqrestore(&mvi->lock, flags);
  801. }
  802. if (likely(pass[0]))
  803. MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
  804. (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  805. if (likely(pass[1]))
  806. MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
  807. (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  808. list_del_init(&q);
  809. free_list:
  810. if (mvs_list)
  811. mvs_task_free_list(mvs_list);
  812. return rc;
  813. }
  814. int mvs_queue_command(struct sas_task *task, const int num,
  815. gfp_t gfp_flags)
  816. {
  817. struct mvs_device *mvi_dev = task->dev->lldd_dev;
  818. struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
  819. if (sas->lldd_max_execute_num < 2)
  820. return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
  821. else
  822. return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
  823. }
  824. static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
  825. {
  826. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  827. mvs_tag_clear(mvi, slot_idx);
  828. }
  829. static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
  830. struct mvs_slot_info *slot, u32 slot_idx)
  831. {
  832. if (!slot->task)
  833. return;
  834. if (!sas_protocol_ata(task->task_proto))
  835. if (slot->n_elem)
  836. dma_unmap_sg(mvi->dev, task->scatter,
  837. slot->n_elem, task->data_dir);
  838. switch (task->task_proto) {
  839. case SAS_PROTOCOL_SMP:
  840. dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
  841. PCI_DMA_FROMDEVICE);
  842. dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
  843. PCI_DMA_TODEVICE);
  844. break;
  845. case SAS_PROTOCOL_SATA:
  846. case SAS_PROTOCOL_STP:
  847. case SAS_PROTOCOL_SSP:
  848. default:
  849. /* do nothing */
  850. break;
  851. }
  852. if (slot->buf) {
  853. pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
  854. slot->buf = NULL;
  855. }
  856. list_del_init(&slot->entry);
  857. task->lldd_task = NULL;
  858. slot->task = NULL;
  859. slot->port = NULL;
  860. slot->slot_tag = 0xFFFFFFFF;
  861. mvs_slot_free(mvi, slot_idx);
  862. }
  863. static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
  864. {
  865. struct mvs_phy *phy = &mvi->phy[phy_no];
  866. struct mvs_port *port = phy->port;
  867. int j, no;
  868. for_each_phy(port->wide_port_phymap, j, no) {
  869. if (j & 1) {
  870. MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
  871. PHYR_WIDE_PORT);
  872. MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
  873. port->wide_port_phymap);
  874. } else {
  875. MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
  876. PHYR_WIDE_PORT);
  877. MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
  878. 0);
  879. }
  880. }
  881. }
  882. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
  883. {
  884. u32 tmp;
  885. struct mvs_phy *phy = &mvi->phy[i];
  886. struct mvs_port *port = phy->port;
  887. tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
  888. if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
  889. if (!port)
  890. phy->phy_attached = 1;
  891. return tmp;
  892. }
  893. if (port) {
  894. if (phy->phy_type & PORT_TYPE_SAS) {
  895. port->wide_port_phymap &= ~(1U << i);
  896. if (!port->wide_port_phymap)
  897. port->port_attached = 0;
  898. mvs_update_wideport(mvi, i);
  899. } else if (phy->phy_type & PORT_TYPE_SATA)
  900. port->port_attached = 0;
  901. phy->port = NULL;
  902. phy->phy_attached = 0;
  903. phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
  904. }
  905. return 0;
  906. }
  907. static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
  908. {
  909. u32 *s = (u32 *) buf;
  910. if (!s)
  911. return NULL;
  912. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
  913. s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  914. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
  915. s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  916. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
  917. s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  918. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
  919. s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  920. if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
  921. s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
  922. return s;
  923. }
  924. static u32 mvs_is_sig_fis_received(u32 irq_status)
  925. {
  926. return irq_status & PHYEV_SIG_FIS;
  927. }
  928. static void mvs_sig_remove_timer(struct mvs_phy *phy)
  929. {
  930. if (phy->timer.function)
  931. del_timer(&phy->timer);
  932. phy->timer.function = NULL;
  933. }
  934. void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
  935. {
  936. struct mvs_phy *phy = &mvi->phy[i];
  937. struct sas_identify_frame *id;
  938. id = (struct sas_identify_frame *)phy->frame_rcvd;
  939. if (get_st) {
  940. phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
  941. phy->phy_status = mvs_is_phy_ready(mvi, i);
  942. }
  943. if (phy->phy_status) {
  944. int oob_done = 0;
  945. struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
  946. oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
  947. MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
  948. if (phy->phy_type & PORT_TYPE_SATA) {
  949. phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
  950. if (mvs_is_sig_fis_received(phy->irq_status)) {
  951. mvs_sig_remove_timer(phy);
  952. phy->phy_attached = 1;
  953. phy->att_dev_sas_addr =
  954. i + mvi->id * mvi->chip->n_phy;
  955. if (oob_done)
  956. sas_phy->oob_mode = SATA_OOB_MODE;
  957. phy->frame_rcvd_size =
  958. sizeof(struct dev_to_host_fis);
  959. mvs_get_d2h_reg(mvi, i, id);
  960. } else {
  961. u32 tmp;
  962. dev_printk(KERN_DEBUG, mvi->dev,
  963. "Phy%d : No sig fis\n", i);
  964. tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
  965. MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
  966. tmp | PHYEV_SIG_FIS);
  967. phy->phy_attached = 0;
  968. phy->phy_type &= ~PORT_TYPE_SATA;
  969. goto out_done;
  970. }
  971. } else if (phy->phy_type & PORT_TYPE_SAS
  972. || phy->att_dev_info & PORT_SSP_INIT_MASK) {
  973. phy->phy_attached = 1;
  974. phy->identify.device_type =
  975. phy->att_dev_info & PORT_DEV_TYPE_MASK;
  976. if (phy->identify.device_type == SAS_END_DEV)
  977. phy->identify.target_port_protocols =
  978. SAS_PROTOCOL_SSP;
  979. else if (phy->identify.device_type != NO_DEVICE)
  980. phy->identify.target_port_protocols =
  981. SAS_PROTOCOL_SMP;
  982. if (oob_done)
  983. sas_phy->oob_mode = SAS_OOB_MODE;
  984. phy->frame_rcvd_size =
  985. sizeof(struct sas_identify_frame);
  986. }
  987. memcpy(sas_phy->attached_sas_addr,
  988. &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
  989. if (MVS_CHIP_DISP->phy_work_around)
  990. MVS_CHIP_DISP->phy_work_around(mvi, i);
  991. }
  992. mv_dprintk("phy %d attach dev info is %x\n",
  993. i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
  994. mv_dprintk("phy %d attach sas addr is %llx\n",
  995. i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
  996. out_done:
  997. if (get_st)
  998. MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
  999. }
  1000. static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
  1001. {
  1002. struct sas_ha_struct *sas_ha = sas_phy->ha;
  1003. struct mvs_info *mvi = NULL; int i = 0, hi;
  1004. struct mvs_phy *phy = sas_phy->lldd_phy;
  1005. struct asd_sas_port *sas_port = sas_phy->port;
  1006. struct mvs_port *port;
  1007. unsigned long flags = 0;
  1008. if (!sas_port)
  1009. return;
  1010. while (sas_ha->sas_phy[i]) {
  1011. if (sas_ha->sas_phy[i] == sas_phy)
  1012. break;
  1013. i++;
  1014. }
  1015. hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
  1016. mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
  1017. if (i >= mvi->chip->n_phy)
  1018. port = &mvi->port[i - mvi->chip->n_phy];
  1019. else
  1020. port = &mvi->port[i];
  1021. if (lock)
  1022. spin_lock_irqsave(&mvi->lock, flags);
  1023. port->port_attached = 1;
  1024. phy->port = port;
  1025. sas_port->lldd_port = port;
  1026. if (phy->phy_type & PORT_TYPE_SAS) {
  1027. port->wide_port_phymap = sas_port->phy_mask;
  1028. mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
  1029. mvs_update_wideport(mvi, sas_phy->id);
  1030. /* direct attached SAS device */
  1031. if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
  1032. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
  1033. MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04);
  1034. }
  1035. }
  1036. if (lock)
  1037. spin_unlock_irqrestore(&mvi->lock, flags);
  1038. }
  1039. static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
  1040. {
  1041. struct domain_device *dev;
  1042. struct mvs_phy *phy = sas_phy->lldd_phy;
  1043. struct mvs_info *mvi = phy->mvi;
  1044. struct asd_sas_port *port = sas_phy->port;
  1045. int phy_no = 0;
  1046. while (phy != &mvi->phy[phy_no]) {
  1047. phy_no++;
  1048. if (phy_no >= MVS_MAX_PHYS)
  1049. return;
  1050. }
  1051. list_for_each_entry(dev, &port->dev_list, dev_list_node)
  1052. mvs_do_release_task(phy->mvi, phy_no, dev);
  1053. }
  1054. void mvs_port_formed(struct asd_sas_phy *sas_phy)
  1055. {
  1056. mvs_port_notify_formed(sas_phy, 1);
  1057. }
  1058. void mvs_port_deformed(struct asd_sas_phy *sas_phy)
  1059. {
  1060. mvs_port_notify_deformed(sas_phy, 1);
  1061. }
  1062. struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
  1063. {
  1064. u32 dev;
  1065. for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
  1066. if (mvi->devices[dev].dev_type == NO_DEVICE) {
  1067. mvi->devices[dev].device_id = dev;
  1068. return &mvi->devices[dev];
  1069. }
  1070. }
  1071. if (dev == MVS_MAX_DEVICES)
  1072. mv_printk("max support %d devices, ignore ..\n",
  1073. MVS_MAX_DEVICES);
  1074. return NULL;
  1075. }
  1076. void mvs_free_dev(struct mvs_device *mvi_dev)
  1077. {
  1078. u32 id = mvi_dev->device_id;
  1079. memset(mvi_dev, 0, sizeof(*mvi_dev));
  1080. mvi_dev->device_id = id;
  1081. mvi_dev->dev_type = NO_DEVICE;
  1082. mvi_dev->dev_status = MVS_DEV_NORMAL;
  1083. mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
  1084. }
  1085. int mvs_dev_found_notify(struct domain_device *dev, int lock)
  1086. {
  1087. unsigned long flags = 0;
  1088. int res = 0;
  1089. struct mvs_info *mvi = NULL;
  1090. struct domain_device *parent_dev = dev->parent;
  1091. struct mvs_device *mvi_device;
  1092. mvi = mvs_find_dev_mvi(dev);
  1093. if (lock)
  1094. spin_lock_irqsave(&mvi->lock, flags);
  1095. mvi_device = mvs_alloc_dev(mvi);
  1096. if (!mvi_device) {
  1097. res = -1;
  1098. goto found_out;
  1099. }
  1100. dev->lldd_dev = mvi_device;
  1101. mvi_device->dev_status = MVS_DEV_NORMAL;
  1102. mvi_device->dev_type = dev->dev_type;
  1103. mvi_device->mvi_info = mvi;
  1104. mvi_device->sas_device = dev;
  1105. if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
  1106. int phy_id;
  1107. u8 phy_num = parent_dev->ex_dev.num_phys;
  1108. struct ex_phy *phy;
  1109. for (phy_id = 0; phy_id < phy_num; phy_id++) {
  1110. phy = &parent_dev->ex_dev.ex_phy[phy_id];
  1111. if (SAS_ADDR(phy->attached_sas_addr) ==
  1112. SAS_ADDR(dev->sas_addr)) {
  1113. mvi_device->attached_phy = phy_id;
  1114. break;
  1115. }
  1116. }
  1117. if (phy_id == phy_num) {
  1118. mv_printk("Error: no attached dev:%016llx"
  1119. "at ex:%016llx.\n",
  1120. SAS_ADDR(dev->sas_addr),
  1121. SAS_ADDR(parent_dev->sas_addr));
  1122. res = -1;
  1123. }
  1124. }
  1125. found_out:
  1126. if (lock)
  1127. spin_unlock_irqrestore(&mvi->lock, flags);
  1128. return res;
  1129. }
  1130. int mvs_dev_found(struct domain_device *dev)
  1131. {
  1132. return mvs_dev_found_notify(dev, 1);
  1133. }
  1134. void mvs_dev_gone_notify(struct domain_device *dev)
  1135. {
  1136. unsigned long flags = 0;
  1137. struct mvs_device *mvi_dev = dev->lldd_dev;
  1138. struct mvs_info *mvi = mvi_dev->mvi_info;
  1139. spin_lock_irqsave(&mvi->lock, flags);
  1140. if (mvi_dev) {
  1141. mv_dprintk("found dev[%d:%x] is gone.\n",
  1142. mvi_dev->device_id, mvi_dev->dev_type);
  1143. mvs_release_task(mvi, dev);
  1144. mvs_free_reg_set(mvi, mvi_dev);
  1145. mvs_free_dev(mvi_dev);
  1146. } else {
  1147. mv_dprintk("found dev has gone.\n");
  1148. }
  1149. dev->lldd_dev = NULL;
  1150. mvi_dev->sas_device = NULL;
  1151. spin_unlock_irqrestore(&mvi->lock, flags);
  1152. }
  1153. void mvs_dev_gone(struct domain_device *dev)
  1154. {
  1155. mvs_dev_gone_notify(dev);
  1156. }
  1157. static void mvs_task_done(struct sas_task *task)
  1158. {
  1159. if (!del_timer(&task->slow_task->timer))
  1160. return;
  1161. complete(&task->slow_task->completion);
  1162. }
  1163. static void mvs_tmf_timedout(unsigned long data)
  1164. {
  1165. struct sas_task *task = (struct sas_task *)data;
  1166. task->task_state_flags |= SAS_TASK_STATE_ABORTED;
  1167. complete(&task->slow_task->completion);
  1168. }
  1169. #define MVS_TASK_TIMEOUT 20
  1170. static int mvs_exec_internal_tmf_task(struct domain_device *dev,
  1171. void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
  1172. {
  1173. int res, retry;
  1174. struct sas_task *task = NULL;
  1175. for (retry = 0; retry < 3; retry++) {
  1176. task = sas_alloc_slow_task(GFP_KERNEL);
  1177. if (!task)
  1178. return -ENOMEM;
  1179. task->dev = dev;
  1180. task->task_proto = dev->tproto;
  1181. memcpy(&task->ssp_task, parameter, para_len);
  1182. task->task_done = mvs_task_done;
  1183. task->slow_task->timer.data = (unsigned long) task;
  1184. task->slow_task->timer.function = mvs_tmf_timedout;
  1185. task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
  1186. add_timer(&task->slow_task->timer);
  1187. res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
  1188. if (res) {
  1189. del_timer(&task->slow_task->timer);
  1190. mv_printk("executing internel task failed:%d\n", res);
  1191. goto ex_err;
  1192. }
  1193. wait_for_completion(&task->slow_task->completion);
  1194. res = TMF_RESP_FUNC_FAILED;
  1195. /* Even TMF timed out, return direct. */
  1196. if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
  1197. if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
  1198. mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
  1199. goto ex_err;
  1200. }
  1201. }
  1202. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  1203. task->task_status.stat == SAM_STAT_GOOD) {
  1204. res = TMF_RESP_FUNC_COMPLETE;
  1205. break;
  1206. }
  1207. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  1208. task->task_status.stat == SAS_DATA_UNDERRUN) {
  1209. /* no error, but return the number of bytes of
  1210. * underrun */
  1211. res = task->task_status.residual;
  1212. break;
  1213. }
  1214. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  1215. task->task_status.stat == SAS_DATA_OVERRUN) {
  1216. mv_dprintk("blocked task error.\n");
  1217. res = -EMSGSIZE;
  1218. break;
  1219. } else {
  1220. mv_dprintk(" task to dev %016llx response: 0x%x "
  1221. "status 0x%x\n",
  1222. SAS_ADDR(dev->sas_addr),
  1223. task->task_status.resp,
  1224. task->task_status.stat);
  1225. sas_free_task(task);
  1226. task = NULL;
  1227. }
  1228. }
  1229. ex_err:
  1230. BUG_ON(retry == 3 && task != NULL);
  1231. sas_free_task(task);
  1232. return res;
  1233. }
  1234. static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
  1235. u8 *lun, struct mvs_tmf_task *tmf)
  1236. {
  1237. struct sas_ssp_task ssp_task;
  1238. if (!(dev->tproto & SAS_PROTOCOL_SSP))
  1239. return TMF_RESP_FUNC_ESUPP;
  1240. memcpy(ssp_task.LUN, lun, 8);
  1241. return mvs_exec_internal_tmf_task(dev, &ssp_task,
  1242. sizeof(ssp_task), tmf);
  1243. }
  1244. /* Standard mandates link reset for ATA (type 0)
  1245. and hard reset for SSP (type 1) , only for RECOVERY */
  1246. static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
  1247. {
  1248. int rc;
  1249. struct sas_phy *phy = sas_get_local_phy(dev);
  1250. int reset_type = (dev->dev_type == SATA_DEV ||
  1251. (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
  1252. rc = sas_phy_reset(phy, reset_type);
  1253. sas_put_local_phy(phy);
  1254. msleep(2000);
  1255. return rc;
  1256. }
  1257. /* mandatory SAM-3 */
  1258. int mvs_lu_reset(struct domain_device *dev, u8 *lun)
  1259. {
  1260. unsigned long flags;
  1261. int rc = TMF_RESP_FUNC_FAILED;
  1262. struct mvs_tmf_task tmf_task;
  1263. struct mvs_device * mvi_dev = dev->lldd_dev;
  1264. struct mvs_info *mvi = mvi_dev->mvi_info;
  1265. tmf_task.tmf = TMF_LU_RESET;
  1266. mvi_dev->dev_status = MVS_DEV_EH;
  1267. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1268. if (rc == TMF_RESP_FUNC_COMPLETE) {
  1269. spin_lock_irqsave(&mvi->lock, flags);
  1270. mvs_release_task(mvi, dev);
  1271. spin_unlock_irqrestore(&mvi->lock, flags);
  1272. }
  1273. /* If failed, fall-through I_T_Nexus reset */
  1274. mv_printk("%s for device[%x]:rc= %d\n", __func__,
  1275. mvi_dev->device_id, rc);
  1276. return rc;
  1277. }
  1278. int mvs_I_T_nexus_reset(struct domain_device *dev)
  1279. {
  1280. unsigned long flags;
  1281. int rc = TMF_RESP_FUNC_FAILED;
  1282. struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
  1283. struct mvs_info *mvi = mvi_dev->mvi_info;
  1284. if (mvi_dev->dev_status != MVS_DEV_EH)
  1285. return TMF_RESP_FUNC_COMPLETE;
  1286. else
  1287. mvi_dev->dev_status = MVS_DEV_NORMAL;
  1288. rc = mvs_debug_I_T_nexus_reset(dev);
  1289. mv_printk("%s for device[%x]:rc= %d\n",
  1290. __func__, mvi_dev->device_id, rc);
  1291. spin_lock_irqsave(&mvi->lock, flags);
  1292. mvs_release_task(mvi, dev);
  1293. spin_unlock_irqrestore(&mvi->lock, flags);
  1294. return rc;
  1295. }
  1296. /* optional SAM-3 */
  1297. int mvs_query_task(struct sas_task *task)
  1298. {
  1299. u32 tag;
  1300. struct scsi_lun lun;
  1301. struct mvs_tmf_task tmf_task;
  1302. int rc = TMF_RESP_FUNC_FAILED;
  1303. if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
  1304. struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
  1305. struct domain_device *dev = task->dev;
  1306. struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
  1307. struct mvs_info *mvi = mvi_dev->mvi_info;
  1308. int_to_scsilun(cmnd->device->lun, &lun);
  1309. rc = mvs_find_tag(mvi, task, &tag);
  1310. if (rc == 0) {
  1311. rc = TMF_RESP_FUNC_FAILED;
  1312. return rc;
  1313. }
  1314. tmf_task.tmf = TMF_QUERY_TASK;
  1315. tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
  1316. rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
  1317. switch (rc) {
  1318. /* The task is still in Lun, release it then */
  1319. case TMF_RESP_FUNC_SUCC:
  1320. /* The task is not in Lun or failed, reset the phy */
  1321. case TMF_RESP_FUNC_FAILED:
  1322. case TMF_RESP_FUNC_COMPLETE:
  1323. break;
  1324. }
  1325. }
  1326. mv_printk("%s:rc= %d\n", __func__, rc);
  1327. return rc;
  1328. }
  1329. /* mandatory SAM-3, still need free task/slot info */
  1330. int mvs_abort_task(struct sas_task *task)
  1331. {
  1332. struct scsi_lun lun;
  1333. struct mvs_tmf_task tmf_task;
  1334. struct domain_device *dev = task->dev;
  1335. struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
  1336. struct mvs_info *mvi;
  1337. int rc = TMF_RESP_FUNC_FAILED;
  1338. unsigned long flags;
  1339. u32 tag;
  1340. if (!mvi_dev) {
  1341. mv_printk("Device has removed\n");
  1342. return TMF_RESP_FUNC_FAILED;
  1343. }
  1344. mvi = mvi_dev->mvi_info;
  1345. spin_lock_irqsave(&task->task_state_lock, flags);
  1346. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  1347. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1348. rc = TMF_RESP_FUNC_COMPLETE;
  1349. goto out;
  1350. }
  1351. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1352. mvi_dev->dev_status = MVS_DEV_EH;
  1353. if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
  1354. struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
  1355. int_to_scsilun(cmnd->device->lun, &lun);
  1356. rc = mvs_find_tag(mvi, task, &tag);
  1357. if (rc == 0) {
  1358. mv_printk("No such tag in %s\n", __func__);
  1359. rc = TMF_RESP_FUNC_FAILED;
  1360. return rc;
  1361. }
  1362. tmf_task.tmf = TMF_ABORT_TASK;
  1363. tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
  1364. rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
  1365. /* if successful, clear the task and callback forwards.*/
  1366. if (rc == TMF_RESP_FUNC_COMPLETE) {
  1367. u32 slot_no;
  1368. struct mvs_slot_info *slot;
  1369. if (task->lldd_task) {
  1370. slot = task->lldd_task;
  1371. slot_no = (u32) (slot - mvi->slot_info);
  1372. spin_lock_irqsave(&mvi->lock, flags);
  1373. mvs_slot_complete(mvi, slot_no, 1);
  1374. spin_unlock_irqrestore(&mvi->lock, flags);
  1375. }
  1376. }
  1377. } else if (task->task_proto & SAS_PROTOCOL_SATA ||
  1378. task->task_proto & SAS_PROTOCOL_STP) {
  1379. if (SATA_DEV == dev->dev_type) {
  1380. struct mvs_slot_info *slot = task->lldd_task;
  1381. u32 slot_idx = (u32)(slot - mvi->slot_info);
  1382. mv_dprintk("mvs_abort_task() mvi=%p task=%p "
  1383. "slot=%p slot_idx=x%x\n",
  1384. mvi, task, slot, slot_idx);
  1385. task->task_state_flags |= SAS_TASK_STATE_ABORTED;
  1386. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1387. rc = TMF_RESP_FUNC_COMPLETE;
  1388. goto out;
  1389. }
  1390. }
  1391. out:
  1392. if (rc != TMF_RESP_FUNC_COMPLETE)
  1393. mv_printk("%s:rc= %d\n", __func__, rc);
  1394. return rc;
  1395. }
  1396. int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
  1397. {
  1398. int rc = TMF_RESP_FUNC_FAILED;
  1399. struct mvs_tmf_task tmf_task;
  1400. tmf_task.tmf = TMF_ABORT_TASK_SET;
  1401. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1402. return rc;
  1403. }
  1404. int mvs_clear_aca(struct domain_device *dev, u8 *lun)
  1405. {
  1406. int rc = TMF_RESP_FUNC_FAILED;
  1407. struct mvs_tmf_task tmf_task;
  1408. tmf_task.tmf = TMF_CLEAR_ACA;
  1409. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1410. return rc;
  1411. }
  1412. int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
  1413. {
  1414. int rc = TMF_RESP_FUNC_FAILED;
  1415. struct mvs_tmf_task tmf_task;
  1416. tmf_task.tmf = TMF_CLEAR_TASK_SET;
  1417. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1418. return rc;
  1419. }
  1420. static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
  1421. u32 slot_idx, int err)
  1422. {
  1423. struct mvs_device *mvi_dev = task->dev->lldd_dev;
  1424. struct task_status_struct *tstat = &task->task_status;
  1425. struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
  1426. int stat = SAM_STAT_GOOD;
  1427. resp->frame_len = sizeof(struct dev_to_host_fis);
  1428. memcpy(&resp->ending_fis[0],
  1429. SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
  1430. sizeof(struct dev_to_host_fis));
  1431. tstat->buf_valid_size = sizeof(*resp);
  1432. if (unlikely(err)) {
  1433. if (unlikely(err & CMD_ISS_STPD))
  1434. stat = SAS_OPEN_REJECT;
  1435. else
  1436. stat = SAS_PROTO_RESPONSE;
  1437. }
  1438. return stat;
  1439. }
  1440. void mvs_set_sense(u8 *buffer, int len, int d_sense,
  1441. int key, int asc, int ascq)
  1442. {
  1443. memset(buffer, 0, len);
  1444. if (d_sense) {
  1445. /* Descriptor format */
  1446. if (len < 4) {
  1447. mv_printk("Length %d of sense buffer too small to "
  1448. "fit sense %x:%x:%x", len, key, asc, ascq);
  1449. }
  1450. buffer[0] = 0x72; /* Response Code */
  1451. if (len > 1)
  1452. buffer[1] = key; /* Sense Key */
  1453. if (len > 2)
  1454. buffer[2] = asc; /* ASC */
  1455. if (len > 3)
  1456. buffer[3] = ascq; /* ASCQ */
  1457. } else {
  1458. if (len < 14) {
  1459. mv_printk("Length %d of sense buffer too small to "
  1460. "fit sense %x:%x:%x", len, key, asc, ascq);
  1461. }
  1462. buffer[0] = 0x70; /* Response Code */
  1463. if (len > 2)
  1464. buffer[2] = key; /* Sense Key */
  1465. if (len > 7)
  1466. buffer[7] = 0x0a; /* Additional Sense Length */
  1467. if (len > 12)
  1468. buffer[12] = asc; /* ASC */
  1469. if (len > 13)
  1470. buffer[13] = ascq; /* ASCQ */
  1471. }
  1472. return;
  1473. }
  1474. void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
  1475. u8 key, u8 asc, u8 asc_q)
  1476. {
  1477. iu->datapres = 2;
  1478. iu->response_data_len = 0;
  1479. iu->sense_data_len = 17;
  1480. iu->status = 02;
  1481. mvs_set_sense(iu->sense_data, 17, 0,
  1482. key, asc, asc_q);
  1483. }
  1484. static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
  1485. u32 slot_idx)
  1486. {
  1487. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1488. int stat;
  1489. u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
  1490. u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
  1491. u32 tfs = 0;
  1492. enum mvs_port_type type = PORT_TYPE_SAS;
  1493. if (err_dw0 & CMD_ISS_STPD)
  1494. MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
  1495. MVS_CHIP_DISP->command_active(mvi, slot_idx);
  1496. stat = SAM_STAT_CHECK_CONDITION;
  1497. switch (task->task_proto) {
  1498. case SAS_PROTOCOL_SSP:
  1499. {
  1500. stat = SAS_ABORTED_TASK;
  1501. if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
  1502. struct ssp_response_iu *iu = slot->response +
  1503. sizeof(struct mvs_err_info);
  1504. mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
  1505. sas_ssp_task_response(mvi->dev, task, iu);
  1506. stat = SAM_STAT_CHECK_CONDITION;
  1507. }
  1508. if (err_dw1 & bit(31))
  1509. mv_printk("reuse same slot, retry command.\n");
  1510. break;
  1511. }
  1512. case SAS_PROTOCOL_SMP:
  1513. stat = SAM_STAT_CHECK_CONDITION;
  1514. break;
  1515. case SAS_PROTOCOL_SATA:
  1516. case SAS_PROTOCOL_STP:
  1517. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1518. {
  1519. task->ata_task.use_ncq = 0;
  1520. stat = SAS_PROTO_RESPONSE;
  1521. mvs_sata_done(mvi, task, slot_idx, err_dw0);
  1522. }
  1523. break;
  1524. default:
  1525. break;
  1526. }
  1527. return stat;
  1528. }
  1529. int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
  1530. {
  1531. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1532. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1533. struct sas_task *task = slot->task;
  1534. struct mvs_device *mvi_dev = NULL;
  1535. struct task_status_struct *tstat;
  1536. struct domain_device *dev;
  1537. u32 aborted;
  1538. void *to;
  1539. enum exec_status sts;
  1540. if (unlikely(!task || !task->lldd_task || !task->dev))
  1541. return -1;
  1542. tstat = &task->task_status;
  1543. dev = task->dev;
  1544. mvi_dev = dev->lldd_dev;
  1545. spin_lock(&task->task_state_lock);
  1546. task->task_state_flags &=
  1547. ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
  1548. task->task_state_flags |= SAS_TASK_STATE_DONE;
  1549. /* race condition*/
  1550. aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
  1551. spin_unlock(&task->task_state_lock);
  1552. memset(tstat, 0, sizeof(*tstat));
  1553. tstat->resp = SAS_TASK_COMPLETE;
  1554. if (unlikely(aborted)) {
  1555. tstat->stat = SAS_ABORTED_TASK;
  1556. if (mvi_dev && mvi_dev->running_req)
  1557. mvi_dev->running_req--;
  1558. if (sas_protocol_ata(task->task_proto))
  1559. mvs_free_reg_set(mvi, mvi_dev);
  1560. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1561. return -1;
  1562. }
  1563. /* when no device attaching, go ahead and complete by error handling*/
  1564. if (unlikely(!mvi_dev || flags)) {
  1565. if (!mvi_dev)
  1566. mv_dprintk("port has not device.\n");
  1567. tstat->stat = SAS_PHY_DOWN;
  1568. goto out;
  1569. }
  1570. /* error info record present */
  1571. if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
  1572. mv_dprintk("port %d slot %d rx_desc %X has error info"
  1573. "%016llX.\n", slot->port->sas_port.id, slot_idx,
  1574. rx_desc, (u64)(*(u64 *)slot->response));
  1575. tstat->stat = mvs_slot_err(mvi, task, slot_idx);
  1576. tstat->resp = SAS_TASK_COMPLETE;
  1577. goto out;
  1578. }
  1579. switch (task->task_proto) {
  1580. case SAS_PROTOCOL_SSP:
  1581. /* hw says status == 0, datapres == 0 */
  1582. if (rx_desc & RXQ_GOOD) {
  1583. tstat->stat = SAM_STAT_GOOD;
  1584. tstat->resp = SAS_TASK_COMPLETE;
  1585. }
  1586. /* response frame present */
  1587. else if (rx_desc & RXQ_RSP) {
  1588. struct ssp_response_iu *iu = slot->response +
  1589. sizeof(struct mvs_err_info);
  1590. sas_ssp_task_response(mvi->dev, task, iu);
  1591. } else
  1592. tstat->stat = SAM_STAT_CHECK_CONDITION;
  1593. break;
  1594. case SAS_PROTOCOL_SMP: {
  1595. struct scatterlist *sg_resp = &task->smp_task.smp_resp;
  1596. tstat->stat = SAM_STAT_GOOD;
  1597. to = kmap_atomic(sg_page(sg_resp));
  1598. memcpy(to + sg_resp->offset,
  1599. slot->response + sizeof(struct mvs_err_info),
  1600. sg_dma_len(sg_resp));
  1601. kunmap_atomic(to);
  1602. break;
  1603. }
  1604. case SAS_PROTOCOL_SATA:
  1605. case SAS_PROTOCOL_STP:
  1606. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
  1607. tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
  1608. break;
  1609. }
  1610. default:
  1611. tstat->stat = SAM_STAT_CHECK_CONDITION;
  1612. break;
  1613. }
  1614. if (!slot->port->port_attached) {
  1615. mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
  1616. tstat->stat = SAS_PHY_DOWN;
  1617. }
  1618. out:
  1619. if (mvi_dev && mvi_dev->running_req) {
  1620. mvi_dev->running_req--;
  1621. if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
  1622. mvs_free_reg_set(mvi, mvi_dev);
  1623. }
  1624. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1625. sts = tstat->stat;
  1626. spin_unlock(&mvi->lock);
  1627. if (task->task_done)
  1628. task->task_done(task);
  1629. spin_lock(&mvi->lock);
  1630. return sts;
  1631. }
  1632. void mvs_do_release_task(struct mvs_info *mvi,
  1633. int phy_no, struct domain_device *dev)
  1634. {
  1635. u32 slot_idx;
  1636. struct mvs_phy *phy;
  1637. struct mvs_port *port;
  1638. struct mvs_slot_info *slot, *slot2;
  1639. phy = &mvi->phy[phy_no];
  1640. port = phy->port;
  1641. if (!port)
  1642. return;
  1643. /* clean cmpl queue in case request is already finished */
  1644. mvs_int_rx(mvi, false);
  1645. list_for_each_entry_safe(slot, slot2, &port->list, entry) {
  1646. struct sas_task *task;
  1647. slot_idx = (u32) (slot - mvi->slot_info);
  1648. task = slot->task;
  1649. if (dev && task->dev != dev)
  1650. continue;
  1651. mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
  1652. slot_idx, slot->slot_tag, task);
  1653. MVS_CHIP_DISP->command_active(mvi, slot_idx);
  1654. mvs_slot_complete(mvi, slot_idx, 1);
  1655. }
  1656. }
  1657. void mvs_release_task(struct mvs_info *mvi,
  1658. struct domain_device *dev)
  1659. {
  1660. int i, phyno[WIDE_PORT_MAX_PHY], num;
  1661. num = mvs_find_dev_phyno(dev, phyno);
  1662. for (i = 0; i < num; i++)
  1663. mvs_do_release_task(mvi, phyno[i], dev);
  1664. }
  1665. static void mvs_phy_disconnected(struct mvs_phy *phy)
  1666. {
  1667. phy->phy_attached = 0;
  1668. phy->att_dev_info = 0;
  1669. phy->att_dev_sas_addr = 0;
  1670. }
  1671. static void mvs_work_queue(struct work_struct *work)
  1672. {
  1673. struct delayed_work *dw = container_of(work, struct delayed_work, work);
  1674. struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
  1675. struct mvs_info *mvi = mwq->mvi;
  1676. unsigned long flags;
  1677. u32 phy_no = (unsigned long) mwq->data;
  1678. struct sas_ha_struct *sas_ha = mvi->sas;
  1679. struct mvs_phy *phy = &mvi->phy[phy_no];
  1680. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1681. spin_lock_irqsave(&mvi->lock, flags);
  1682. if (mwq->handler & PHY_PLUG_EVENT) {
  1683. if (phy->phy_event & PHY_PLUG_OUT) {
  1684. u32 tmp;
  1685. struct sas_identify_frame *id;
  1686. id = (struct sas_identify_frame *)phy->frame_rcvd;
  1687. tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
  1688. phy->phy_event &= ~PHY_PLUG_OUT;
  1689. if (!(tmp & PHY_READY_MASK)) {
  1690. sas_phy_disconnected(sas_phy);
  1691. mvs_phy_disconnected(phy);
  1692. sas_ha->notify_phy_event(sas_phy,
  1693. PHYE_LOSS_OF_SIGNAL);
  1694. mv_dprintk("phy%d Removed Device\n", phy_no);
  1695. } else {
  1696. MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
  1697. mvs_update_phyinfo(mvi, phy_no, 1);
  1698. mvs_bytes_dmaed(mvi, phy_no);
  1699. mvs_port_notify_formed(sas_phy, 0);
  1700. mv_dprintk("phy%d Attached Device\n", phy_no);
  1701. }
  1702. }
  1703. } else if (mwq->handler & EXP_BRCT_CHG) {
  1704. phy->phy_event &= ~EXP_BRCT_CHG;
  1705. sas_ha->notify_port_event(sas_phy,
  1706. PORTE_BROADCAST_RCVD);
  1707. mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
  1708. }
  1709. list_del(&mwq->entry);
  1710. spin_unlock_irqrestore(&mvi->lock, flags);
  1711. kfree(mwq);
  1712. }
  1713. static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
  1714. {
  1715. struct mvs_wq *mwq;
  1716. int ret = 0;
  1717. mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
  1718. if (mwq) {
  1719. mwq->mvi = mvi;
  1720. mwq->data = data;
  1721. mwq->handler = handler;
  1722. MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
  1723. list_add_tail(&mwq->entry, &mvi->wq_list);
  1724. schedule_delayed_work(&mwq->work_q, HZ * 2);
  1725. } else
  1726. ret = -ENOMEM;
  1727. return ret;
  1728. }
  1729. static void mvs_sig_time_out(unsigned long tphy)
  1730. {
  1731. struct mvs_phy *phy = (struct mvs_phy *)tphy;
  1732. struct mvs_info *mvi = phy->mvi;
  1733. u8 phy_no;
  1734. for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
  1735. if (&mvi->phy[phy_no] == phy) {
  1736. mv_dprintk("Get signature time out, reset phy %d\n",
  1737. phy_no+mvi->id*mvi->chip->n_phy);
  1738. MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
  1739. }
  1740. }
  1741. }
  1742. void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
  1743. {
  1744. u32 tmp;
  1745. struct mvs_phy *phy = &mvi->phy[phy_no];
  1746. phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
  1747. MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
  1748. mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
  1749. MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
  1750. mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
  1751. phy->irq_status);
  1752. /*
  1753. * events is port event now ,
  1754. * we need check the interrupt status which belongs to per port.
  1755. */
  1756. if (phy->irq_status & PHYEV_DCDR_ERR) {
  1757. mv_dprintk("phy %d STP decoding error.\n",
  1758. phy_no + mvi->id*mvi->chip->n_phy);
  1759. }
  1760. if (phy->irq_status & PHYEV_POOF) {
  1761. mdelay(500);
  1762. if (!(phy->phy_event & PHY_PLUG_OUT)) {
  1763. int dev_sata = phy->phy_type & PORT_TYPE_SATA;
  1764. int ready;
  1765. mvs_do_release_task(mvi, phy_no, NULL);
  1766. phy->phy_event |= PHY_PLUG_OUT;
  1767. MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
  1768. mvs_handle_event(mvi,
  1769. (void *)(unsigned long)phy_no,
  1770. PHY_PLUG_EVENT);
  1771. ready = mvs_is_phy_ready(mvi, phy_no);
  1772. if (ready || dev_sata) {
  1773. if (MVS_CHIP_DISP->stp_reset)
  1774. MVS_CHIP_DISP->stp_reset(mvi,
  1775. phy_no);
  1776. else
  1777. MVS_CHIP_DISP->phy_reset(mvi,
  1778. phy_no, MVS_SOFT_RESET);
  1779. return;
  1780. }
  1781. }
  1782. }
  1783. if (phy->irq_status & PHYEV_COMWAKE) {
  1784. tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
  1785. MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
  1786. tmp | PHYEV_SIG_FIS);
  1787. if (phy->timer.function == NULL) {
  1788. phy->timer.data = (unsigned long)phy;
  1789. phy->timer.function = mvs_sig_time_out;
  1790. phy->timer.expires = jiffies + 5*HZ;
  1791. add_timer(&phy->timer);
  1792. }
  1793. }
  1794. if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
  1795. phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
  1796. mv_dprintk("notify plug in on phy[%d]\n", phy_no);
  1797. if (phy->phy_status) {
  1798. mdelay(10);
  1799. MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
  1800. if (phy->phy_type & PORT_TYPE_SATA) {
  1801. tmp = MVS_CHIP_DISP->read_port_irq_mask(
  1802. mvi, phy_no);
  1803. tmp &= ~PHYEV_SIG_FIS;
  1804. MVS_CHIP_DISP->write_port_irq_mask(mvi,
  1805. phy_no, tmp);
  1806. }
  1807. mvs_update_phyinfo(mvi, phy_no, 0);
  1808. if (phy->phy_type & PORT_TYPE_SAS) {
  1809. MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
  1810. mdelay(10);
  1811. }
  1812. mvs_bytes_dmaed(mvi, phy_no);
  1813. /* whether driver is going to handle hot plug */
  1814. if (phy->phy_event & PHY_PLUG_OUT) {
  1815. mvs_port_notify_formed(&phy->sas_phy, 0);
  1816. phy->phy_event &= ~PHY_PLUG_OUT;
  1817. }
  1818. } else {
  1819. mv_dprintk("plugin interrupt but phy%d is gone\n",
  1820. phy_no + mvi->id*mvi->chip->n_phy);
  1821. }
  1822. } else if (phy->irq_status & PHYEV_BROAD_CH) {
  1823. mv_dprintk("phy %d broadcast change.\n",
  1824. phy_no + mvi->id*mvi->chip->n_phy);
  1825. mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
  1826. EXP_BRCT_CHG);
  1827. }
  1828. }
  1829. int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
  1830. {
  1831. u32 rx_prod_idx, rx_desc;
  1832. bool attn = false;
  1833. /* the first dword in the RX ring is special: it contains
  1834. * a mirror of the hardware's RX producer index, so that
  1835. * we don't have to stall the CPU reading that register.
  1836. * The actual RX ring is offset by one dword, due to this.
  1837. */
  1838. rx_prod_idx = mvi->rx_cons;
  1839. mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
  1840. if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
  1841. return 0;
  1842. /* The CMPL_Q may come late, read from register and try again
  1843. * note: if coalescing is enabled,
  1844. * it will need to read from register every time for sure
  1845. */
  1846. if (unlikely(mvi->rx_cons == rx_prod_idx))
  1847. mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
  1848. if (mvi->rx_cons == rx_prod_idx)
  1849. return 0;
  1850. while (mvi->rx_cons != rx_prod_idx) {
  1851. /* increment our internal RX consumer pointer */
  1852. rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
  1853. rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
  1854. if (likely(rx_desc & RXQ_DONE))
  1855. mvs_slot_complete(mvi, rx_desc, 0);
  1856. if (rx_desc & RXQ_ATTN) {
  1857. attn = true;
  1858. } else if (rx_desc & RXQ_ERR) {
  1859. if (!(rx_desc & RXQ_DONE))
  1860. mvs_slot_complete(mvi, rx_desc, 0);
  1861. } else if (rx_desc & RXQ_SLOT_RESET) {
  1862. mvs_slot_free(mvi, rx_desc);
  1863. }
  1864. }
  1865. if (attn && self_clear)
  1866. MVS_CHIP_DISP->int_full(mvi);
  1867. return 0;
  1868. }