mv_sas.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187
  1. /*
  2. * Marvell 88SE64xx/88SE94xx main function
  3. *
  4. * Copyright 2007 Red Hat, Inc.
  5. * Copyright 2008 Marvell. <kewei@marvell.com>
  6. * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
  7. *
  8. * This file is licensed under GPLv2.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; version 2 of the
  13. * License.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  23. * USA
  24. */
  25. #include "mv_sas.h"
  26. static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
  27. {
  28. if (task->lldd_task) {
  29. struct mvs_slot_info *slot;
  30. slot = task->lldd_task;
  31. *tag = slot->slot_tag;
  32. return 1;
  33. }
  34. return 0;
  35. }
  36. void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
  37. {
  38. void *bitmap = mvi->tags;
  39. clear_bit(tag, bitmap);
  40. }
  41. void mvs_tag_free(struct mvs_info *mvi, u32 tag)
  42. {
  43. mvs_tag_clear(mvi, tag);
  44. }
  45. void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
  46. {
  47. void *bitmap = mvi->tags;
  48. set_bit(tag, bitmap);
  49. }
  50. inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
  51. {
  52. unsigned int index, tag;
  53. void *bitmap = mvi->tags;
  54. index = find_first_zero_bit(bitmap, mvi->tags_num);
  55. tag = index;
  56. if (tag >= mvi->tags_num)
  57. return -SAS_QUEUE_FULL;
  58. mvs_tag_set(mvi, tag);
  59. *tag_out = tag;
  60. return 0;
  61. }
  62. void mvs_tag_init(struct mvs_info *mvi)
  63. {
  64. int i;
  65. for (i = 0; i < mvi->tags_num; ++i)
  66. mvs_tag_clear(mvi, i);
  67. }
  68. struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
  69. {
  70. unsigned long i = 0, j = 0, hi = 0;
  71. struct sas_ha_struct *sha = dev->port->ha;
  72. struct mvs_info *mvi = NULL;
  73. struct asd_sas_phy *phy;
  74. while (sha->sas_port[i]) {
  75. if (sha->sas_port[i] == dev->port) {
  76. phy = container_of(sha->sas_port[i]->phy_list.next,
  77. struct asd_sas_phy, port_phy_el);
  78. j = 0;
  79. while (sha->sas_phy[j]) {
  80. if (sha->sas_phy[j] == phy)
  81. break;
  82. j++;
  83. }
  84. break;
  85. }
  86. i++;
  87. }
  88. hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
  89. mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
  90. return mvi;
  91. }
  92. int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
  93. {
  94. unsigned long i = 0, j = 0, n = 0, num = 0;
  95. struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
  96. struct mvs_info *mvi = mvi_dev->mvi_info;
  97. struct sas_ha_struct *sha = dev->port->ha;
  98. while (sha->sas_port[i]) {
  99. if (sha->sas_port[i] == dev->port) {
  100. struct asd_sas_phy *phy;
  101. list_for_each_entry(phy,
  102. &sha->sas_port[i]->phy_list, port_phy_el) {
  103. j = 0;
  104. while (sha->sas_phy[j]) {
  105. if (sha->sas_phy[j] == phy)
  106. break;
  107. j++;
  108. }
  109. phyno[n] = (j >= mvi->chip->n_phy) ?
  110. (j - mvi->chip->n_phy) : j;
  111. num++;
  112. n++;
  113. }
  114. break;
  115. }
  116. i++;
  117. }
  118. return num;
  119. }
  120. struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
  121. u8 reg_set)
  122. {
  123. u32 dev_no;
  124. for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
  125. if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
  126. continue;
  127. if (mvi->devices[dev_no].taskfileset == reg_set)
  128. return &mvi->devices[dev_no];
  129. }
  130. return NULL;
  131. }
  132. static inline void mvs_free_reg_set(struct mvs_info *mvi,
  133. struct mvs_device *dev)
  134. {
  135. if (!dev) {
  136. mv_printk("device has been free.\n");
  137. return;
  138. }
  139. if (dev->taskfileset == MVS_ID_NOT_MAPPED)
  140. return;
  141. MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
  142. }
  143. static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
  144. struct mvs_device *dev)
  145. {
  146. if (dev->taskfileset != MVS_ID_NOT_MAPPED)
  147. return 0;
  148. return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
  149. }
  150. void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
  151. {
  152. u32 no;
  153. for_each_phy(phy_mask, phy_mask, no) {
  154. if (!(phy_mask & 1))
  155. continue;
  156. MVS_CHIP_DISP->phy_reset(mvi, no, hard);
  157. }
  158. }
  159. int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  160. void *funcdata)
  161. {
  162. int rc = 0, phy_id = sas_phy->id;
  163. u32 tmp, i = 0, hi;
  164. struct sas_ha_struct *sha = sas_phy->ha;
  165. struct mvs_info *mvi = NULL;
  166. while (sha->sas_phy[i]) {
  167. if (sha->sas_phy[i] == sas_phy)
  168. break;
  169. i++;
  170. }
  171. hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
  172. mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
  173. switch (func) {
  174. case PHY_FUNC_SET_LINK_RATE:
  175. MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
  176. break;
  177. case PHY_FUNC_HARD_RESET:
  178. tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
  179. if (tmp & PHY_RST_HARD)
  180. break;
  181. MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
  182. break;
  183. case PHY_FUNC_LINK_RESET:
  184. MVS_CHIP_DISP->phy_enable(mvi, phy_id);
  185. MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
  186. break;
  187. case PHY_FUNC_DISABLE:
  188. MVS_CHIP_DISP->phy_disable(mvi, phy_id);
  189. break;
  190. case PHY_FUNC_RELEASE_SPINUP_HOLD:
  191. default:
  192. rc = -EOPNOTSUPP;
  193. }
  194. msleep(200);
  195. return rc;
  196. }
  197. void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
  198. u32 off_lo, u32 off_hi, u64 sas_addr)
  199. {
  200. u32 lo = (u32)sas_addr;
  201. u32 hi = (u32)(sas_addr>>32);
  202. MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
  203. MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
  204. MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
  205. MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
  206. }
  207. static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
  208. {
  209. struct mvs_phy *phy = &mvi->phy[i];
  210. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  211. struct sas_ha_struct *sas_ha;
  212. if (!phy->phy_attached)
  213. return;
  214. if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
  215. && phy->phy_type & PORT_TYPE_SAS) {
  216. return;
  217. }
  218. sas_ha = mvi->sas;
  219. sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
  220. if (sas_phy->phy) {
  221. struct sas_phy *sphy = sas_phy->phy;
  222. sphy->negotiated_linkrate = sas_phy->linkrate;
  223. sphy->minimum_linkrate = phy->minimum_linkrate;
  224. sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
  225. sphy->maximum_linkrate = phy->maximum_linkrate;
  226. sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
  227. }
  228. if (phy->phy_type & PORT_TYPE_SAS) {
  229. struct sas_identify_frame *id;
  230. id = (struct sas_identify_frame *)phy->frame_rcvd;
  231. id->dev_type = phy->identify.device_type;
  232. id->initiator_bits = SAS_PROTOCOL_ALL;
  233. id->target_bits = phy->identify.target_port_protocols;
  234. } else if (phy->phy_type & PORT_TYPE_SATA) {
  235. /*Nothing*/
  236. }
  237. mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
  238. sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
  239. mvi->sas->notify_port_event(sas_phy,
  240. PORTE_BYTES_DMAED);
  241. }
  242. void mvs_scan_start(struct Scsi_Host *shost)
  243. {
  244. int i, j;
  245. unsigned short core_nr;
  246. struct mvs_info *mvi;
  247. struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
  248. struct mvs_prv_info *mvs_prv = sha->lldd_ha;
  249. core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
  250. for (j = 0; j < core_nr; j++) {
  251. mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
  252. for (i = 0; i < mvi->chip->n_phy; ++i)
  253. mvs_bytes_dmaed(mvi, i);
  254. }
  255. mvs_prv->scan_finished = 1;
  256. }
  257. int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
  258. {
  259. struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
  260. struct mvs_prv_info *mvs_prv = sha->lldd_ha;
  261. if (mvs_prv->scan_finished == 0)
  262. return 0;
  263. scsi_flush_work(shost);
  264. return 1;
  265. }
  266. static int mvs_task_prep_smp(struct mvs_info *mvi,
  267. struct mvs_task_exec_info *tei)
  268. {
  269. int elem, rc, i;
  270. struct sas_task *task = tei->task;
  271. struct mvs_cmd_hdr *hdr = tei->hdr;
  272. struct domain_device *dev = task->dev;
  273. struct asd_sas_port *sas_port = dev->port;
  274. struct scatterlist *sg_req, *sg_resp;
  275. u32 req_len, resp_len, tag = tei->tag;
  276. void *buf_tmp;
  277. u8 *buf_oaf;
  278. dma_addr_t buf_tmp_dma;
  279. void *buf_prd;
  280. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  281. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  282. /*
  283. * DMA-map SMP request, response buffers
  284. */
  285. sg_req = &task->smp_task.smp_req;
  286. elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
  287. if (!elem)
  288. return -ENOMEM;
  289. req_len = sg_dma_len(sg_req);
  290. sg_resp = &task->smp_task.smp_resp;
  291. elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
  292. if (!elem) {
  293. rc = -ENOMEM;
  294. goto err_out;
  295. }
  296. resp_len = SB_RFB_MAX;
  297. /* must be in dwords */
  298. if ((req_len & 0x3) || (resp_len & 0x3)) {
  299. rc = -EINVAL;
  300. goto err_out_2;
  301. }
  302. /*
  303. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  304. */
  305. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
  306. buf_tmp = slot->buf;
  307. buf_tmp_dma = slot->buf_dma;
  308. hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
  309. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  310. buf_oaf = buf_tmp;
  311. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  312. buf_tmp += MVS_OAF_SZ;
  313. buf_tmp_dma += MVS_OAF_SZ;
  314. /* region 3: PRD table *********************************** */
  315. buf_prd = buf_tmp;
  316. if (tei->n_elem)
  317. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  318. else
  319. hdr->prd_tbl = 0;
  320. i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
  321. buf_tmp += i;
  322. buf_tmp_dma += i;
  323. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  324. slot->response = buf_tmp;
  325. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  326. if (mvi->flags & MVF_FLAG_SOC)
  327. hdr->reserved[0] = 0;
  328. /*
  329. * Fill in TX ring and command slot header
  330. */
  331. slot->tx = mvi->tx_prod;
  332. mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
  333. TXQ_MODE_I | tag |
  334. (sas_port->phy_mask << TXQ_PHY_SHIFT));
  335. hdr->flags |= flags;
  336. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
  337. hdr->tags = cpu_to_le32(tag);
  338. hdr->data_len = 0;
  339. /* generate open address frame hdr (first 12 bytes) */
  340. /* initiator, SMP, ftype 1h */
  341. buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
  342. buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
  343. *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
  344. memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
  345. /* fill in PRD (scatter/gather) table, if any */
  346. MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
  347. return 0;
  348. err_out_2:
  349. dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
  350. PCI_DMA_FROMDEVICE);
  351. err_out:
  352. dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
  353. PCI_DMA_TODEVICE);
  354. return rc;
  355. }
  356. static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
  357. {
  358. struct ata_queued_cmd *qc = task->uldd_task;
  359. if (qc) {
  360. if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
  361. qc->tf.command == ATA_CMD_FPDMA_READ) {
  362. *tag = qc->tag;
  363. return 1;
  364. }
  365. }
  366. return 0;
  367. }
  368. static int mvs_task_prep_ata(struct mvs_info *mvi,
  369. struct mvs_task_exec_info *tei)
  370. {
  371. struct sas_task *task = tei->task;
  372. struct domain_device *dev = task->dev;
  373. struct mvs_device *mvi_dev = dev->lldd_dev;
  374. struct mvs_cmd_hdr *hdr = tei->hdr;
  375. struct asd_sas_port *sas_port = dev->port;
  376. struct mvs_slot_info *slot;
  377. void *buf_prd;
  378. u32 tag = tei->tag, hdr_tag;
  379. u32 flags, del_q;
  380. void *buf_tmp;
  381. u8 *buf_cmd, *buf_oaf;
  382. dma_addr_t buf_tmp_dma;
  383. u32 i, req_len, resp_len;
  384. const u32 max_resp_len = SB_RFB_MAX;
  385. if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
  386. mv_dprintk("Have not enough regiset for dev %d.\n",
  387. mvi_dev->device_id);
  388. return -EBUSY;
  389. }
  390. slot = &mvi->slot_info[tag];
  391. slot->tx = mvi->tx_prod;
  392. del_q = TXQ_MODE_I | tag |
  393. (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
  394. (sas_port->phy_mask << TXQ_PHY_SHIFT) |
  395. (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
  396. mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
  397. if (task->data_dir == DMA_FROM_DEVICE)
  398. flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
  399. else
  400. flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  401. if (task->ata_task.use_ncq)
  402. flags |= MCH_FPDMA;
  403. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
  404. if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
  405. flags |= MCH_ATAPI;
  406. }
  407. hdr->flags = cpu_to_le32(flags);
  408. if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
  409. task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
  410. else
  411. hdr_tag = tag;
  412. hdr->tags = cpu_to_le32(hdr_tag);
  413. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  414. /*
  415. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  416. */
  417. /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
  418. buf_cmd = buf_tmp = slot->buf;
  419. buf_tmp_dma = slot->buf_dma;
  420. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  421. buf_tmp += MVS_ATA_CMD_SZ;
  422. buf_tmp_dma += MVS_ATA_CMD_SZ;
  423. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  424. /* used for STP. unused for SATA? */
  425. buf_oaf = buf_tmp;
  426. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  427. buf_tmp += MVS_OAF_SZ;
  428. buf_tmp_dma += MVS_OAF_SZ;
  429. /* region 3: PRD table ********************************************* */
  430. buf_prd = buf_tmp;
  431. if (tei->n_elem)
  432. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  433. else
  434. hdr->prd_tbl = 0;
  435. i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
  436. buf_tmp += i;
  437. buf_tmp_dma += i;
  438. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  439. slot->response = buf_tmp;
  440. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  441. if (mvi->flags & MVF_FLAG_SOC)
  442. hdr->reserved[0] = 0;
  443. req_len = sizeof(struct host_to_dev_fis);
  444. resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
  445. sizeof(struct mvs_err_info) - i;
  446. /* request, response lengths */
  447. resp_len = min(resp_len, max_resp_len);
  448. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  449. if (likely(!task->ata_task.device_control_reg_update))
  450. task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
  451. /* fill in command FIS and ATAPI CDB */
  452. memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
  453. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
  454. memcpy(buf_cmd + STP_ATAPI_CMD,
  455. task->ata_task.atapi_packet, 16);
  456. /* generate open address frame hdr (first 12 bytes) */
  457. /* initiator, STP, ftype 1h */
  458. buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
  459. buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
  460. *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
  461. memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
  462. /* fill in PRD (scatter/gather) table, if any */
  463. MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
  464. if (task->data_dir == DMA_FROM_DEVICE)
  465. MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
  466. TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
  467. return 0;
  468. }
  469. static int mvs_task_prep_ssp(struct mvs_info *mvi,
  470. struct mvs_task_exec_info *tei, int is_tmf,
  471. struct mvs_tmf_task *tmf)
  472. {
  473. struct sas_task *task = tei->task;
  474. struct mvs_cmd_hdr *hdr = tei->hdr;
  475. struct mvs_port *port = tei->port;
  476. struct domain_device *dev = task->dev;
  477. struct mvs_device *mvi_dev = dev->lldd_dev;
  478. struct asd_sas_port *sas_port = dev->port;
  479. struct mvs_slot_info *slot;
  480. void *buf_prd;
  481. struct ssp_frame_hdr *ssp_hdr;
  482. void *buf_tmp;
  483. u8 *buf_cmd, *buf_oaf, fburst = 0;
  484. dma_addr_t buf_tmp_dma;
  485. u32 flags;
  486. u32 resp_len, req_len, i, tag = tei->tag;
  487. const u32 max_resp_len = SB_RFB_MAX;
  488. u32 phy_mask;
  489. slot = &mvi->slot_info[tag];
  490. phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
  491. sas_port->phy_mask) & TXQ_PHY_MASK;
  492. slot->tx = mvi->tx_prod;
  493. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  494. (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
  495. (phy_mask << TXQ_PHY_SHIFT));
  496. flags = MCH_RETRY;
  497. if (task->ssp_task.enable_first_burst) {
  498. flags |= MCH_FBURST;
  499. fburst = (1 << 7);
  500. }
  501. if (is_tmf)
  502. flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
  503. else
  504. flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
  505. hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
  506. hdr->tags = cpu_to_le32(tag);
  507. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  508. /*
  509. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  510. */
  511. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  512. buf_cmd = buf_tmp = slot->buf;
  513. buf_tmp_dma = slot->buf_dma;
  514. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  515. buf_tmp += MVS_SSP_CMD_SZ;
  516. buf_tmp_dma += MVS_SSP_CMD_SZ;
  517. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  518. buf_oaf = buf_tmp;
  519. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  520. buf_tmp += MVS_OAF_SZ;
  521. buf_tmp_dma += MVS_OAF_SZ;
  522. /* region 3: PRD table ********************************************* */
  523. buf_prd = buf_tmp;
  524. if (tei->n_elem)
  525. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  526. else
  527. hdr->prd_tbl = 0;
  528. i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
  529. buf_tmp += i;
  530. buf_tmp_dma += i;
  531. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  532. slot->response = buf_tmp;
  533. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  534. if (mvi->flags & MVF_FLAG_SOC)
  535. hdr->reserved[0] = 0;
  536. resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
  537. sizeof(struct mvs_err_info) - i;
  538. resp_len = min(resp_len, max_resp_len);
  539. req_len = sizeof(struct ssp_frame_hdr) + 28;
  540. /* request, response lengths */
  541. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  542. /* generate open address frame hdr (first 12 bytes) */
  543. /* initiator, SSP, ftype 1h */
  544. buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
  545. buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
  546. *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
  547. memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
  548. /* fill in SSP frame header (Command Table.SSP frame header) */
  549. ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
  550. if (is_tmf)
  551. ssp_hdr->frame_type = SSP_TASK;
  552. else
  553. ssp_hdr->frame_type = SSP_COMMAND;
  554. memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
  555. HASHED_SAS_ADDR_SIZE);
  556. memcpy(ssp_hdr->hashed_src_addr,
  557. dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  558. ssp_hdr->tag = cpu_to_be16(tag);
  559. /* fill in IU for TASK and Command Frame */
  560. buf_cmd += sizeof(*ssp_hdr);
  561. memcpy(buf_cmd, &task->ssp_task.LUN, 8);
  562. if (ssp_hdr->frame_type != SSP_TASK) {
  563. buf_cmd[9] = fburst | task->ssp_task.task_attr |
  564. (task->ssp_task.task_prio << 3);
  565. memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
  566. } else{
  567. buf_cmd[10] = tmf->tmf;
  568. switch (tmf->tmf) {
  569. case TMF_ABORT_TASK:
  570. case TMF_QUERY_TASK:
  571. buf_cmd[12] =
  572. (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
  573. buf_cmd[13] =
  574. tmf->tag_of_task_to_be_managed & 0xff;
  575. break;
  576. default:
  577. break;
  578. }
  579. }
  580. /* fill in PRD (scatter/gather) table, if any */
  581. MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
  582. return 0;
  583. }
  584. #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
  585. static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
  586. struct mvs_tmf_task *tmf, int *pass)
  587. {
  588. struct domain_device *dev = task->dev;
  589. struct mvs_device *mvi_dev = dev->lldd_dev;
  590. struct mvs_task_exec_info tei;
  591. struct mvs_slot_info *slot;
  592. u32 tag = 0xdeadbeef, n_elem = 0;
  593. int rc = 0;
  594. if (!dev->port) {
  595. struct task_status_struct *tsm = &task->task_status;
  596. tsm->resp = SAS_TASK_UNDELIVERED;
  597. tsm->stat = SAS_PHY_DOWN;
  598. /*
  599. * libsas will use dev->port, should
  600. * not call task_done for sata
  601. */
  602. if (dev->dev_type != SATA_DEV)
  603. task->task_done(task);
  604. return rc;
  605. }
  606. if (DEV_IS_GONE(mvi_dev)) {
  607. if (mvi_dev)
  608. mv_dprintk("device %d not ready.\n",
  609. mvi_dev->device_id);
  610. else
  611. mv_dprintk("device %016llx not ready.\n",
  612. SAS_ADDR(dev->sas_addr));
  613. rc = SAS_PHY_DOWN;
  614. return rc;
  615. }
  616. tei.port = dev->port->lldd_port;
  617. if (tei.port && !tei.port->port_attached && !tmf) {
  618. if (sas_protocol_ata(task->task_proto)) {
  619. struct task_status_struct *ts = &task->task_status;
  620. mv_dprintk("SATA/STP port %d does not attach"
  621. "device.\n", dev->port->id);
  622. ts->resp = SAS_TASK_COMPLETE;
  623. ts->stat = SAS_PHY_DOWN;
  624. task->task_done(task);
  625. } else {
  626. struct task_status_struct *ts = &task->task_status;
  627. mv_dprintk("SAS port %d does not attach"
  628. "device.\n", dev->port->id);
  629. ts->resp = SAS_TASK_UNDELIVERED;
  630. ts->stat = SAS_PHY_DOWN;
  631. task->task_done(task);
  632. }
  633. return rc;
  634. }
  635. if (!sas_protocol_ata(task->task_proto)) {
  636. if (task->num_scatter) {
  637. n_elem = dma_map_sg(mvi->dev,
  638. task->scatter,
  639. task->num_scatter,
  640. task->data_dir);
  641. if (!n_elem) {
  642. rc = -ENOMEM;
  643. goto prep_out;
  644. }
  645. }
  646. } else {
  647. n_elem = task->num_scatter;
  648. }
  649. rc = mvs_tag_alloc(mvi, &tag);
  650. if (rc)
  651. goto err_out;
  652. slot = &mvi->slot_info[tag];
  653. task->lldd_task = NULL;
  654. slot->n_elem = n_elem;
  655. slot->slot_tag = tag;
  656. slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
  657. if (!slot->buf)
  658. goto err_out_tag;
  659. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  660. tei.task = task;
  661. tei.hdr = &mvi->slot[tag];
  662. tei.tag = tag;
  663. tei.n_elem = n_elem;
  664. switch (task->task_proto) {
  665. case SAS_PROTOCOL_SMP:
  666. rc = mvs_task_prep_smp(mvi, &tei);
  667. break;
  668. case SAS_PROTOCOL_SSP:
  669. rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
  670. break;
  671. case SAS_PROTOCOL_SATA:
  672. case SAS_PROTOCOL_STP:
  673. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  674. rc = mvs_task_prep_ata(mvi, &tei);
  675. break;
  676. default:
  677. dev_printk(KERN_ERR, mvi->dev,
  678. "unknown sas_task proto: 0x%x\n",
  679. task->task_proto);
  680. rc = -EINVAL;
  681. break;
  682. }
  683. if (rc) {
  684. mv_dprintk("rc is %x\n", rc);
  685. goto err_out_slot_buf;
  686. }
  687. slot->task = task;
  688. slot->port = tei.port;
  689. task->lldd_task = slot;
  690. list_add_tail(&slot->entry, &tei.port->list);
  691. spin_lock(&task->task_state_lock);
  692. task->task_state_flags |= SAS_TASK_AT_INITIATOR;
  693. spin_unlock(&task->task_state_lock);
  694. mvi_dev->running_req++;
  695. ++(*pass);
  696. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  697. return rc;
  698. err_out_slot_buf:
  699. pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
  700. err_out_tag:
  701. mvs_tag_free(mvi, tag);
  702. err_out:
  703. dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
  704. if (!sas_protocol_ata(task->task_proto))
  705. if (n_elem)
  706. dma_unmap_sg(mvi->dev, task->scatter, n_elem,
  707. task->data_dir);
  708. prep_out:
  709. return rc;
  710. }
  711. static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
  712. {
  713. struct mvs_task_list *first = NULL;
  714. for (; *num > 0; --*num) {
  715. struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
  716. if (!mvs_list)
  717. break;
  718. INIT_LIST_HEAD(&mvs_list->list);
  719. if (!first)
  720. first = mvs_list;
  721. else
  722. list_add_tail(&mvs_list->list, &first->list);
  723. }
  724. return first;
  725. }
  726. static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
  727. {
  728. LIST_HEAD(list);
  729. struct list_head *pos, *a;
  730. struct mvs_task_list *mlist = NULL;
  731. __list_add(&list, mvs_list->list.prev, &mvs_list->list);
  732. list_for_each_safe(pos, a, &list) {
  733. list_del_init(pos);
  734. mlist = list_entry(pos, struct mvs_task_list, list);
  735. kmem_cache_free(mvs_task_list_cache, mlist);
  736. }
  737. }
  738. static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
  739. struct completion *completion, int is_tmf,
  740. struct mvs_tmf_task *tmf)
  741. {
  742. struct domain_device *dev = task->dev;
  743. struct mvs_info *mvi = NULL;
  744. u32 rc = 0;
  745. u32 pass = 0;
  746. unsigned long flags = 0;
  747. mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
  748. if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
  749. spin_unlock_irq(dev->sata_dev.ap->lock);
  750. spin_lock_irqsave(&mvi->lock, flags);
  751. rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
  752. if (rc)
  753. dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
  754. if (likely(pass))
  755. MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
  756. (MVS_CHIP_SLOT_SZ - 1));
  757. spin_unlock_irqrestore(&mvi->lock, flags);
  758. if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
  759. spin_lock_irq(dev->sata_dev.ap->lock);
  760. return rc;
  761. }
  762. static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
  763. struct completion *completion, int is_tmf,
  764. struct mvs_tmf_task *tmf)
  765. {
  766. struct domain_device *dev = task->dev;
  767. struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
  768. struct mvs_info *mvi = NULL;
  769. struct sas_task *t = task;
  770. struct mvs_task_list *mvs_list = NULL, *a;
  771. LIST_HEAD(q);
  772. int pass[2] = {0};
  773. u32 rc = 0;
  774. u32 n = num;
  775. unsigned long flags = 0;
  776. mvs_list = mvs_task_alloc_list(&n, gfp_flags);
  777. if (n) {
  778. printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
  779. rc = -ENOMEM;
  780. goto free_list;
  781. }
  782. __list_add(&q, mvs_list->list.prev, &mvs_list->list);
  783. list_for_each_entry(a, &q, list) {
  784. a->task = t;
  785. t = list_entry(t->list.next, struct sas_task, list);
  786. }
  787. list_for_each_entry(a, &q , list) {
  788. t = a->task;
  789. mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
  790. spin_lock_irqsave(&mvi->lock, flags);
  791. rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
  792. if (rc)
  793. dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
  794. spin_unlock_irqrestore(&mvi->lock, flags);
  795. }
  796. if (likely(pass[0]))
  797. MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
  798. (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  799. if (likely(pass[1]))
  800. MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
  801. (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  802. list_del_init(&q);
  803. free_list:
  804. if (mvs_list)
  805. mvs_task_free_list(mvs_list);
  806. return rc;
  807. }
  808. int mvs_queue_command(struct sas_task *task, const int num,
  809. gfp_t gfp_flags)
  810. {
  811. struct mvs_device *mvi_dev = task->dev->lldd_dev;
  812. struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
  813. if (sas->lldd_max_execute_num < 2)
  814. return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
  815. else
  816. return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
  817. }
  818. static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
  819. {
  820. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  821. mvs_tag_clear(mvi, slot_idx);
  822. }
  823. static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
  824. struct mvs_slot_info *slot, u32 slot_idx)
  825. {
  826. if (!slot->task)
  827. return;
  828. if (!sas_protocol_ata(task->task_proto))
  829. if (slot->n_elem)
  830. dma_unmap_sg(mvi->dev, task->scatter,
  831. slot->n_elem, task->data_dir);
  832. switch (task->task_proto) {
  833. case SAS_PROTOCOL_SMP:
  834. dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
  835. PCI_DMA_FROMDEVICE);
  836. dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
  837. PCI_DMA_TODEVICE);
  838. break;
  839. case SAS_PROTOCOL_SATA:
  840. case SAS_PROTOCOL_STP:
  841. case SAS_PROTOCOL_SSP:
  842. default:
  843. /* do nothing */
  844. break;
  845. }
  846. if (slot->buf) {
  847. pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
  848. slot->buf = NULL;
  849. }
  850. list_del_init(&slot->entry);
  851. task->lldd_task = NULL;
  852. slot->task = NULL;
  853. slot->port = NULL;
  854. slot->slot_tag = 0xFFFFFFFF;
  855. mvs_slot_free(mvi, slot_idx);
  856. }
  857. static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
  858. {
  859. struct mvs_phy *phy = &mvi->phy[phy_no];
  860. struct mvs_port *port = phy->port;
  861. int j, no;
  862. for_each_phy(port->wide_port_phymap, j, no) {
  863. if (j & 1) {
  864. MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
  865. PHYR_WIDE_PORT);
  866. MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
  867. port->wide_port_phymap);
  868. } else {
  869. MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
  870. PHYR_WIDE_PORT);
  871. MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
  872. 0);
  873. }
  874. }
  875. }
  876. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
  877. {
  878. u32 tmp;
  879. struct mvs_phy *phy = &mvi->phy[i];
  880. struct mvs_port *port = phy->port;
  881. tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
  882. if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
  883. if (!port)
  884. phy->phy_attached = 1;
  885. return tmp;
  886. }
  887. if (port) {
  888. if (phy->phy_type & PORT_TYPE_SAS) {
  889. port->wide_port_phymap &= ~(1U << i);
  890. if (!port->wide_port_phymap)
  891. port->port_attached = 0;
  892. mvs_update_wideport(mvi, i);
  893. } else if (phy->phy_type & PORT_TYPE_SATA)
  894. port->port_attached = 0;
  895. phy->port = NULL;
  896. phy->phy_attached = 0;
  897. phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
  898. }
  899. return 0;
  900. }
  901. static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
  902. {
  903. u32 *s = (u32 *) buf;
  904. if (!s)
  905. return NULL;
  906. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
  907. s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  908. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
  909. s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  910. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
  911. s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  912. MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
  913. s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
  914. if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
  915. s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
  916. return s;
  917. }
  918. static u32 mvs_is_sig_fis_received(u32 irq_status)
  919. {
  920. return irq_status & PHYEV_SIG_FIS;
  921. }
  922. static void mvs_sig_remove_timer(struct mvs_phy *phy)
  923. {
  924. if (phy->timer.function)
  925. del_timer(&phy->timer);
  926. phy->timer.function = NULL;
  927. }
  928. void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
  929. {
  930. struct mvs_phy *phy = &mvi->phy[i];
  931. struct sas_identify_frame *id;
  932. id = (struct sas_identify_frame *)phy->frame_rcvd;
  933. if (get_st) {
  934. phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
  935. phy->phy_status = mvs_is_phy_ready(mvi, i);
  936. }
  937. if (phy->phy_status) {
  938. int oob_done = 0;
  939. struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
  940. oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
  941. MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
  942. if (phy->phy_type & PORT_TYPE_SATA) {
  943. phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
  944. if (mvs_is_sig_fis_received(phy->irq_status)) {
  945. mvs_sig_remove_timer(phy);
  946. phy->phy_attached = 1;
  947. phy->att_dev_sas_addr =
  948. i + mvi->id * mvi->chip->n_phy;
  949. if (oob_done)
  950. sas_phy->oob_mode = SATA_OOB_MODE;
  951. phy->frame_rcvd_size =
  952. sizeof(struct dev_to_host_fis);
  953. mvs_get_d2h_reg(mvi, i, id);
  954. } else {
  955. u32 tmp;
  956. dev_printk(KERN_DEBUG, mvi->dev,
  957. "Phy%d : No sig fis\n", i);
  958. tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
  959. MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
  960. tmp | PHYEV_SIG_FIS);
  961. phy->phy_attached = 0;
  962. phy->phy_type &= ~PORT_TYPE_SATA;
  963. goto out_done;
  964. }
  965. } else if (phy->phy_type & PORT_TYPE_SAS
  966. || phy->att_dev_info & PORT_SSP_INIT_MASK) {
  967. phy->phy_attached = 1;
  968. phy->identify.device_type =
  969. phy->att_dev_info & PORT_DEV_TYPE_MASK;
  970. if (phy->identify.device_type == SAS_END_DEV)
  971. phy->identify.target_port_protocols =
  972. SAS_PROTOCOL_SSP;
  973. else if (phy->identify.device_type != NO_DEVICE)
  974. phy->identify.target_port_protocols =
  975. SAS_PROTOCOL_SMP;
  976. if (oob_done)
  977. sas_phy->oob_mode = SAS_OOB_MODE;
  978. phy->frame_rcvd_size =
  979. sizeof(struct sas_identify_frame);
  980. }
  981. memcpy(sas_phy->attached_sas_addr,
  982. &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
  983. if (MVS_CHIP_DISP->phy_work_around)
  984. MVS_CHIP_DISP->phy_work_around(mvi, i);
  985. }
  986. mv_dprintk("phy %d attach dev info is %x\n",
  987. i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
  988. mv_dprintk("phy %d attach sas addr is %llx\n",
  989. i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
  990. out_done:
  991. if (get_st)
  992. MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
  993. }
  994. static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
  995. {
  996. struct sas_ha_struct *sas_ha = sas_phy->ha;
  997. struct mvs_info *mvi = NULL; int i = 0, hi;
  998. struct mvs_phy *phy = sas_phy->lldd_phy;
  999. struct asd_sas_port *sas_port = sas_phy->port;
  1000. struct mvs_port *port;
  1001. unsigned long flags = 0;
  1002. if (!sas_port)
  1003. return;
  1004. while (sas_ha->sas_phy[i]) {
  1005. if (sas_ha->sas_phy[i] == sas_phy)
  1006. break;
  1007. i++;
  1008. }
  1009. hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
  1010. mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
  1011. if (i >= mvi->chip->n_phy)
  1012. port = &mvi->port[i - mvi->chip->n_phy];
  1013. else
  1014. port = &mvi->port[i];
  1015. if (lock)
  1016. spin_lock_irqsave(&mvi->lock, flags);
  1017. port->port_attached = 1;
  1018. phy->port = port;
  1019. sas_port->lldd_port = port;
  1020. if (phy->phy_type & PORT_TYPE_SAS) {
  1021. port->wide_port_phymap = sas_port->phy_mask;
  1022. mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
  1023. mvs_update_wideport(mvi, sas_phy->id);
  1024. }
  1025. if (lock)
  1026. spin_unlock_irqrestore(&mvi->lock, flags);
  1027. }
  1028. static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
  1029. {
  1030. struct domain_device *dev;
  1031. struct mvs_phy *phy = sas_phy->lldd_phy;
  1032. struct mvs_info *mvi = phy->mvi;
  1033. struct asd_sas_port *port = sas_phy->port;
  1034. int phy_no = 0;
  1035. while (phy != &mvi->phy[phy_no]) {
  1036. phy_no++;
  1037. if (phy_no >= MVS_MAX_PHYS)
  1038. return;
  1039. }
  1040. list_for_each_entry(dev, &port->dev_list, dev_list_node)
  1041. mvs_do_release_task(phy->mvi, phy_no, dev);
  1042. }
  1043. void mvs_port_formed(struct asd_sas_phy *sas_phy)
  1044. {
  1045. mvs_port_notify_formed(sas_phy, 1);
  1046. }
  1047. void mvs_port_deformed(struct asd_sas_phy *sas_phy)
  1048. {
  1049. mvs_port_notify_deformed(sas_phy, 1);
  1050. }
  1051. struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
  1052. {
  1053. u32 dev;
  1054. for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
  1055. if (mvi->devices[dev].dev_type == NO_DEVICE) {
  1056. mvi->devices[dev].device_id = dev;
  1057. return &mvi->devices[dev];
  1058. }
  1059. }
  1060. if (dev == MVS_MAX_DEVICES)
  1061. mv_printk("max support %d devices, ignore ..\n",
  1062. MVS_MAX_DEVICES);
  1063. return NULL;
  1064. }
  1065. void mvs_free_dev(struct mvs_device *mvi_dev)
  1066. {
  1067. u32 id = mvi_dev->device_id;
  1068. memset(mvi_dev, 0, sizeof(*mvi_dev));
  1069. mvi_dev->device_id = id;
  1070. mvi_dev->dev_type = NO_DEVICE;
  1071. mvi_dev->dev_status = MVS_DEV_NORMAL;
  1072. mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
  1073. }
  1074. int mvs_dev_found_notify(struct domain_device *dev, int lock)
  1075. {
  1076. unsigned long flags = 0;
  1077. int res = 0;
  1078. struct mvs_info *mvi = NULL;
  1079. struct domain_device *parent_dev = dev->parent;
  1080. struct mvs_device *mvi_device;
  1081. mvi = mvs_find_dev_mvi(dev);
  1082. if (lock)
  1083. spin_lock_irqsave(&mvi->lock, flags);
  1084. mvi_device = mvs_alloc_dev(mvi);
  1085. if (!mvi_device) {
  1086. res = -1;
  1087. goto found_out;
  1088. }
  1089. dev->lldd_dev = mvi_device;
  1090. mvi_device->dev_status = MVS_DEV_NORMAL;
  1091. mvi_device->dev_type = dev->dev_type;
  1092. mvi_device->mvi_info = mvi;
  1093. mvi_device->sas_device = dev;
  1094. if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
  1095. int phy_id;
  1096. u8 phy_num = parent_dev->ex_dev.num_phys;
  1097. struct ex_phy *phy;
  1098. for (phy_id = 0; phy_id < phy_num; phy_id++) {
  1099. phy = &parent_dev->ex_dev.ex_phy[phy_id];
  1100. if (SAS_ADDR(phy->attached_sas_addr) ==
  1101. SAS_ADDR(dev->sas_addr)) {
  1102. mvi_device->attached_phy = phy_id;
  1103. break;
  1104. }
  1105. }
  1106. if (phy_id == phy_num) {
  1107. mv_printk("Error: no attached dev:%016llx"
  1108. "at ex:%016llx.\n",
  1109. SAS_ADDR(dev->sas_addr),
  1110. SAS_ADDR(parent_dev->sas_addr));
  1111. res = -1;
  1112. }
  1113. }
  1114. found_out:
  1115. if (lock)
  1116. spin_unlock_irqrestore(&mvi->lock, flags);
  1117. return res;
  1118. }
  1119. int mvs_dev_found(struct domain_device *dev)
  1120. {
  1121. return mvs_dev_found_notify(dev, 1);
  1122. }
  1123. void mvs_dev_gone_notify(struct domain_device *dev)
  1124. {
  1125. unsigned long flags = 0;
  1126. struct mvs_device *mvi_dev = dev->lldd_dev;
  1127. struct mvs_info *mvi = mvi_dev->mvi_info;
  1128. spin_lock_irqsave(&mvi->lock, flags);
  1129. if (mvi_dev) {
  1130. mv_dprintk("found dev[%d:%x] is gone.\n",
  1131. mvi_dev->device_id, mvi_dev->dev_type);
  1132. mvs_release_task(mvi, dev);
  1133. mvs_free_reg_set(mvi, mvi_dev);
  1134. mvs_free_dev(mvi_dev);
  1135. } else {
  1136. mv_dprintk("found dev has gone.\n");
  1137. }
  1138. dev->lldd_dev = NULL;
  1139. mvi_dev->sas_device = NULL;
  1140. spin_unlock_irqrestore(&mvi->lock, flags);
  1141. }
  1142. void mvs_dev_gone(struct domain_device *dev)
  1143. {
  1144. mvs_dev_gone_notify(dev);
  1145. }
  1146. static void mvs_task_done(struct sas_task *task)
  1147. {
  1148. if (!del_timer(&task->timer))
  1149. return;
  1150. complete(&task->completion);
  1151. }
  1152. static void mvs_tmf_timedout(unsigned long data)
  1153. {
  1154. struct sas_task *task = (struct sas_task *)data;
  1155. task->task_state_flags |= SAS_TASK_STATE_ABORTED;
  1156. complete(&task->completion);
  1157. }
  1158. #define MVS_TASK_TIMEOUT 20
  1159. static int mvs_exec_internal_tmf_task(struct domain_device *dev,
  1160. void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
  1161. {
  1162. int res, retry;
  1163. struct sas_task *task = NULL;
  1164. for (retry = 0; retry < 3; retry++) {
  1165. task = sas_alloc_task(GFP_KERNEL);
  1166. if (!task)
  1167. return -ENOMEM;
  1168. task->dev = dev;
  1169. task->task_proto = dev->tproto;
  1170. memcpy(&task->ssp_task, parameter, para_len);
  1171. task->task_done = mvs_task_done;
  1172. task->timer.data = (unsigned long) task;
  1173. task->timer.function = mvs_tmf_timedout;
  1174. task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
  1175. add_timer(&task->timer);
  1176. res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
  1177. if (res) {
  1178. del_timer(&task->timer);
  1179. mv_printk("executing internel task failed:%d\n", res);
  1180. goto ex_err;
  1181. }
  1182. wait_for_completion(&task->completion);
  1183. res = TMF_RESP_FUNC_FAILED;
  1184. /* Even TMF timed out, return direct. */
  1185. if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
  1186. if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
  1187. mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
  1188. goto ex_err;
  1189. }
  1190. }
  1191. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  1192. task->task_status.stat == SAM_STAT_GOOD) {
  1193. res = TMF_RESP_FUNC_COMPLETE;
  1194. break;
  1195. }
  1196. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  1197. task->task_status.stat == SAS_DATA_UNDERRUN) {
  1198. /* no error, but return the number of bytes of
  1199. * underrun */
  1200. res = task->task_status.residual;
  1201. break;
  1202. }
  1203. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  1204. task->task_status.stat == SAS_DATA_OVERRUN) {
  1205. mv_dprintk("blocked task error.\n");
  1206. res = -EMSGSIZE;
  1207. break;
  1208. } else {
  1209. mv_dprintk(" task to dev %016llx response: 0x%x "
  1210. "status 0x%x\n",
  1211. SAS_ADDR(dev->sas_addr),
  1212. task->task_status.resp,
  1213. task->task_status.stat);
  1214. sas_free_task(task);
  1215. task = NULL;
  1216. }
  1217. }
  1218. ex_err:
  1219. BUG_ON(retry == 3 && task != NULL);
  1220. sas_free_task(task);
  1221. return res;
  1222. }
  1223. static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
  1224. u8 *lun, struct mvs_tmf_task *tmf)
  1225. {
  1226. struct sas_ssp_task ssp_task;
  1227. if (!(dev->tproto & SAS_PROTOCOL_SSP))
  1228. return TMF_RESP_FUNC_ESUPP;
  1229. memcpy(ssp_task.LUN, lun, 8);
  1230. return mvs_exec_internal_tmf_task(dev, &ssp_task,
  1231. sizeof(ssp_task), tmf);
  1232. }
  1233. /* Standard mandates link reset for ATA (type 0)
  1234. and hard reset for SSP (type 1) , only for RECOVERY */
  1235. static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
  1236. {
  1237. int rc;
  1238. struct sas_phy *phy = sas_find_local_phy(dev);
  1239. int reset_type = (dev->dev_type == SATA_DEV ||
  1240. (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
  1241. rc = sas_phy_reset(phy, reset_type);
  1242. msleep(2000);
  1243. return rc;
  1244. }
  1245. /* mandatory SAM-3 */
  1246. int mvs_lu_reset(struct domain_device *dev, u8 *lun)
  1247. {
  1248. unsigned long flags;
  1249. int rc = TMF_RESP_FUNC_FAILED;
  1250. struct mvs_tmf_task tmf_task;
  1251. struct mvs_device * mvi_dev = dev->lldd_dev;
  1252. struct mvs_info *mvi = mvi_dev->mvi_info;
  1253. tmf_task.tmf = TMF_LU_RESET;
  1254. mvi_dev->dev_status = MVS_DEV_EH;
  1255. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1256. if (rc == TMF_RESP_FUNC_COMPLETE) {
  1257. spin_lock_irqsave(&mvi->lock, flags);
  1258. mvs_release_task(mvi, dev);
  1259. spin_unlock_irqrestore(&mvi->lock, flags);
  1260. }
  1261. /* If failed, fall-through I_T_Nexus reset */
  1262. mv_printk("%s for device[%x]:rc= %d\n", __func__,
  1263. mvi_dev->device_id, rc);
  1264. return rc;
  1265. }
  1266. int mvs_I_T_nexus_reset(struct domain_device *dev)
  1267. {
  1268. unsigned long flags;
  1269. int rc = TMF_RESP_FUNC_FAILED;
  1270. struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
  1271. struct mvs_info *mvi = mvi_dev->mvi_info;
  1272. if (mvi_dev->dev_status != MVS_DEV_EH)
  1273. return TMF_RESP_FUNC_COMPLETE;
  1274. else
  1275. mvi_dev->dev_status = MVS_DEV_NORMAL;
  1276. rc = mvs_debug_I_T_nexus_reset(dev);
  1277. mv_printk("%s for device[%x]:rc= %d\n",
  1278. __func__, mvi_dev->device_id, rc);
  1279. spin_lock_irqsave(&mvi->lock, flags);
  1280. mvs_release_task(mvi, dev);
  1281. spin_unlock_irqrestore(&mvi->lock, flags);
  1282. return rc;
  1283. }
  1284. /* optional SAM-3 */
  1285. int mvs_query_task(struct sas_task *task)
  1286. {
  1287. u32 tag;
  1288. struct scsi_lun lun;
  1289. struct mvs_tmf_task tmf_task;
  1290. int rc = TMF_RESP_FUNC_FAILED;
  1291. if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
  1292. struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
  1293. struct domain_device *dev = task->dev;
  1294. struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
  1295. struct mvs_info *mvi = mvi_dev->mvi_info;
  1296. int_to_scsilun(cmnd->device->lun, &lun);
  1297. rc = mvs_find_tag(mvi, task, &tag);
  1298. if (rc == 0) {
  1299. rc = TMF_RESP_FUNC_FAILED;
  1300. return rc;
  1301. }
  1302. tmf_task.tmf = TMF_QUERY_TASK;
  1303. tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
  1304. rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
  1305. switch (rc) {
  1306. /* The task is still in Lun, release it then */
  1307. case TMF_RESP_FUNC_SUCC:
  1308. /* The task is not in Lun or failed, reset the phy */
  1309. case TMF_RESP_FUNC_FAILED:
  1310. case TMF_RESP_FUNC_COMPLETE:
  1311. break;
  1312. }
  1313. }
  1314. mv_printk("%s:rc= %d\n", __func__, rc);
  1315. return rc;
  1316. }
  1317. /* mandatory SAM-3, still need free task/slot info */
  1318. int mvs_abort_task(struct sas_task *task)
  1319. {
  1320. struct scsi_lun lun;
  1321. struct mvs_tmf_task tmf_task;
  1322. struct domain_device *dev = task->dev;
  1323. struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
  1324. struct mvs_info *mvi;
  1325. int rc = TMF_RESP_FUNC_FAILED;
  1326. unsigned long flags;
  1327. u32 tag;
  1328. if (!mvi_dev) {
  1329. mv_printk("Device has removed\n");
  1330. return TMF_RESP_FUNC_FAILED;
  1331. }
  1332. mvi = mvi_dev->mvi_info;
  1333. spin_lock_irqsave(&task->task_state_lock, flags);
  1334. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  1335. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1336. rc = TMF_RESP_FUNC_COMPLETE;
  1337. goto out;
  1338. }
  1339. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1340. mvi_dev->dev_status = MVS_DEV_EH;
  1341. if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
  1342. struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
  1343. int_to_scsilun(cmnd->device->lun, &lun);
  1344. rc = mvs_find_tag(mvi, task, &tag);
  1345. if (rc == 0) {
  1346. mv_printk("No such tag in %s\n", __func__);
  1347. rc = TMF_RESP_FUNC_FAILED;
  1348. return rc;
  1349. }
  1350. tmf_task.tmf = TMF_ABORT_TASK;
  1351. tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
  1352. rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
  1353. /* if successful, clear the task and callback forwards.*/
  1354. if (rc == TMF_RESP_FUNC_COMPLETE) {
  1355. u32 slot_no;
  1356. struct mvs_slot_info *slot;
  1357. if (task->lldd_task) {
  1358. slot = task->lldd_task;
  1359. slot_no = (u32) (slot - mvi->slot_info);
  1360. spin_lock_irqsave(&mvi->lock, flags);
  1361. mvs_slot_complete(mvi, slot_no, 1);
  1362. spin_unlock_irqrestore(&mvi->lock, flags);
  1363. }
  1364. }
  1365. } else if (task->task_proto & SAS_PROTOCOL_SATA ||
  1366. task->task_proto & SAS_PROTOCOL_STP) {
  1367. if (SATA_DEV == dev->dev_type) {
  1368. struct mvs_slot_info *slot = task->lldd_task;
  1369. u32 slot_idx = (u32)(slot - mvi->slot_info);
  1370. mv_dprintk("mvs_abort_task() mvi=%p task=%p "
  1371. "slot=%p slot_idx=x%x\n",
  1372. mvi, task, slot, slot_idx);
  1373. mvs_tmf_timedout((unsigned long)task);
  1374. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1375. rc = TMF_RESP_FUNC_COMPLETE;
  1376. goto out;
  1377. }
  1378. }
  1379. out:
  1380. if (rc != TMF_RESP_FUNC_COMPLETE)
  1381. mv_printk("%s:rc= %d\n", __func__, rc);
  1382. return rc;
  1383. }
  1384. int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
  1385. {
  1386. int rc = TMF_RESP_FUNC_FAILED;
  1387. struct mvs_tmf_task tmf_task;
  1388. tmf_task.tmf = TMF_ABORT_TASK_SET;
  1389. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1390. return rc;
  1391. }
  1392. int mvs_clear_aca(struct domain_device *dev, u8 *lun)
  1393. {
  1394. int rc = TMF_RESP_FUNC_FAILED;
  1395. struct mvs_tmf_task tmf_task;
  1396. tmf_task.tmf = TMF_CLEAR_ACA;
  1397. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1398. return rc;
  1399. }
  1400. int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
  1401. {
  1402. int rc = TMF_RESP_FUNC_FAILED;
  1403. struct mvs_tmf_task tmf_task;
  1404. tmf_task.tmf = TMF_CLEAR_TASK_SET;
  1405. rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
  1406. return rc;
  1407. }
  1408. static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
  1409. u32 slot_idx, int err)
  1410. {
  1411. struct mvs_device *mvi_dev = task->dev->lldd_dev;
  1412. struct task_status_struct *tstat = &task->task_status;
  1413. struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
  1414. int stat = SAM_STAT_GOOD;
  1415. resp->frame_len = sizeof(struct dev_to_host_fis);
  1416. memcpy(&resp->ending_fis[0],
  1417. SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
  1418. sizeof(struct dev_to_host_fis));
  1419. tstat->buf_valid_size = sizeof(*resp);
  1420. if (unlikely(err)) {
  1421. if (unlikely(err & CMD_ISS_STPD))
  1422. stat = SAS_OPEN_REJECT;
  1423. else
  1424. stat = SAS_PROTO_RESPONSE;
  1425. }
  1426. return stat;
  1427. }
  1428. void mvs_set_sense(u8 *buffer, int len, int d_sense,
  1429. int key, int asc, int ascq)
  1430. {
  1431. memset(buffer, 0, len);
  1432. if (d_sense) {
  1433. /* Descriptor format */
  1434. if (len < 4) {
  1435. mv_printk("Length %d of sense buffer too small to "
  1436. "fit sense %x:%x:%x", len, key, asc, ascq);
  1437. }
  1438. buffer[0] = 0x72; /* Response Code */
  1439. if (len > 1)
  1440. buffer[1] = key; /* Sense Key */
  1441. if (len > 2)
  1442. buffer[2] = asc; /* ASC */
  1443. if (len > 3)
  1444. buffer[3] = ascq; /* ASCQ */
  1445. } else {
  1446. if (len < 14) {
  1447. mv_printk("Length %d of sense buffer too small to "
  1448. "fit sense %x:%x:%x", len, key, asc, ascq);
  1449. }
  1450. buffer[0] = 0x70; /* Response Code */
  1451. if (len > 2)
  1452. buffer[2] = key; /* Sense Key */
  1453. if (len > 7)
  1454. buffer[7] = 0x0a; /* Additional Sense Length */
  1455. if (len > 12)
  1456. buffer[12] = asc; /* ASC */
  1457. if (len > 13)
  1458. buffer[13] = ascq; /* ASCQ */
  1459. }
  1460. return;
  1461. }
  1462. void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
  1463. u8 key, u8 asc, u8 asc_q)
  1464. {
  1465. iu->datapres = 2;
  1466. iu->response_data_len = 0;
  1467. iu->sense_data_len = 17;
  1468. iu->status = 02;
  1469. mvs_set_sense(iu->sense_data, 17, 0,
  1470. key, asc, asc_q);
  1471. }
  1472. static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
  1473. u32 slot_idx)
  1474. {
  1475. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1476. int stat;
  1477. u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
  1478. u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
  1479. u32 tfs = 0;
  1480. enum mvs_port_type type = PORT_TYPE_SAS;
  1481. if (err_dw0 & CMD_ISS_STPD)
  1482. MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
  1483. MVS_CHIP_DISP->command_active(mvi, slot_idx);
  1484. stat = SAM_STAT_CHECK_CONDITION;
  1485. switch (task->task_proto) {
  1486. case SAS_PROTOCOL_SSP:
  1487. {
  1488. stat = SAS_ABORTED_TASK;
  1489. if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
  1490. struct ssp_response_iu *iu = slot->response +
  1491. sizeof(struct mvs_err_info);
  1492. mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
  1493. sas_ssp_task_response(mvi->dev, task, iu);
  1494. stat = SAM_STAT_CHECK_CONDITION;
  1495. }
  1496. if (err_dw1 & bit(31))
  1497. mv_printk("reuse same slot, retry command.\n");
  1498. break;
  1499. }
  1500. case SAS_PROTOCOL_SMP:
  1501. stat = SAM_STAT_CHECK_CONDITION;
  1502. break;
  1503. case SAS_PROTOCOL_SATA:
  1504. case SAS_PROTOCOL_STP:
  1505. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1506. {
  1507. task->ata_task.use_ncq = 0;
  1508. stat = SAS_PROTO_RESPONSE;
  1509. mvs_sata_done(mvi, task, slot_idx, err_dw0);
  1510. }
  1511. break;
  1512. default:
  1513. break;
  1514. }
  1515. return stat;
  1516. }
  1517. int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
  1518. {
  1519. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1520. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1521. struct sas_task *task = slot->task;
  1522. struct mvs_device *mvi_dev = NULL;
  1523. struct task_status_struct *tstat;
  1524. struct domain_device *dev;
  1525. u32 aborted;
  1526. void *to;
  1527. enum exec_status sts;
  1528. if (unlikely(!task || !task->lldd_task || !task->dev))
  1529. return -1;
  1530. tstat = &task->task_status;
  1531. dev = task->dev;
  1532. mvi_dev = dev->lldd_dev;
  1533. spin_lock(&task->task_state_lock);
  1534. task->task_state_flags &=
  1535. ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
  1536. task->task_state_flags |= SAS_TASK_STATE_DONE;
  1537. /* race condition*/
  1538. aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
  1539. spin_unlock(&task->task_state_lock);
  1540. memset(tstat, 0, sizeof(*tstat));
  1541. tstat->resp = SAS_TASK_COMPLETE;
  1542. if (unlikely(aborted)) {
  1543. tstat->stat = SAS_ABORTED_TASK;
  1544. if (mvi_dev && mvi_dev->running_req)
  1545. mvi_dev->running_req--;
  1546. if (sas_protocol_ata(task->task_proto))
  1547. mvs_free_reg_set(mvi, mvi_dev);
  1548. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1549. return -1;
  1550. }
  1551. /* when no device attaching, go ahead and complete by error handling*/
  1552. if (unlikely(!mvi_dev || flags)) {
  1553. if (!mvi_dev)
  1554. mv_dprintk("port has not device.\n");
  1555. tstat->stat = SAS_PHY_DOWN;
  1556. goto out;
  1557. }
  1558. /* error info record present */
  1559. if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
  1560. mv_dprintk("port %d slot %d rx_desc %X has error info"
  1561. "%016llX.\n", slot->port->sas_port.id, slot_idx,
  1562. rx_desc, (u64)(*(u64 *)slot->response));
  1563. tstat->stat = mvs_slot_err(mvi, task, slot_idx);
  1564. tstat->resp = SAS_TASK_COMPLETE;
  1565. goto out;
  1566. }
  1567. switch (task->task_proto) {
  1568. case SAS_PROTOCOL_SSP:
  1569. /* hw says status == 0, datapres == 0 */
  1570. if (rx_desc & RXQ_GOOD) {
  1571. tstat->stat = SAM_STAT_GOOD;
  1572. tstat->resp = SAS_TASK_COMPLETE;
  1573. }
  1574. /* response frame present */
  1575. else if (rx_desc & RXQ_RSP) {
  1576. struct ssp_response_iu *iu = slot->response +
  1577. sizeof(struct mvs_err_info);
  1578. sas_ssp_task_response(mvi->dev, task, iu);
  1579. } else
  1580. tstat->stat = SAM_STAT_CHECK_CONDITION;
  1581. break;
  1582. case SAS_PROTOCOL_SMP: {
  1583. struct scatterlist *sg_resp = &task->smp_task.smp_resp;
  1584. tstat->stat = SAM_STAT_GOOD;
  1585. to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
  1586. memcpy(to + sg_resp->offset,
  1587. slot->response + sizeof(struct mvs_err_info),
  1588. sg_dma_len(sg_resp));
  1589. kunmap_atomic(to, KM_IRQ0);
  1590. break;
  1591. }
  1592. case SAS_PROTOCOL_SATA:
  1593. case SAS_PROTOCOL_STP:
  1594. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
  1595. tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
  1596. break;
  1597. }
  1598. default:
  1599. tstat->stat = SAM_STAT_CHECK_CONDITION;
  1600. break;
  1601. }
  1602. if (!slot->port->port_attached) {
  1603. mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
  1604. tstat->stat = SAS_PHY_DOWN;
  1605. }
  1606. out:
  1607. if (mvi_dev && mvi_dev->running_req) {
  1608. mvi_dev->running_req--;
  1609. if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
  1610. mvs_free_reg_set(mvi, mvi_dev);
  1611. }
  1612. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1613. sts = tstat->stat;
  1614. spin_unlock(&mvi->lock);
  1615. if (task->task_done)
  1616. task->task_done(task);
  1617. spin_lock(&mvi->lock);
  1618. return sts;
  1619. }
  1620. void mvs_do_release_task(struct mvs_info *mvi,
  1621. int phy_no, struct domain_device *dev)
  1622. {
  1623. u32 slot_idx;
  1624. struct mvs_phy *phy;
  1625. struct mvs_port *port;
  1626. struct mvs_slot_info *slot, *slot2;
  1627. phy = &mvi->phy[phy_no];
  1628. port = phy->port;
  1629. if (!port)
  1630. return;
  1631. /* clean cmpl queue in case request is already finished */
  1632. mvs_int_rx(mvi, false);
  1633. list_for_each_entry_safe(slot, slot2, &port->list, entry) {
  1634. struct sas_task *task;
  1635. slot_idx = (u32) (slot - mvi->slot_info);
  1636. task = slot->task;
  1637. if (dev && task->dev != dev)
  1638. continue;
  1639. mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
  1640. slot_idx, slot->slot_tag, task);
  1641. MVS_CHIP_DISP->command_active(mvi, slot_idx);
  1642. mvs_slot_complete(mvi, slot_idx, 1);
  1643. }
  1644. }
  1645. void mvs_release_task(struct mvs_info *mvi,
  1646. struct domain_device *dev)
  1647. {
  1648. int i, phyno[WIDE_PORT_MAX_PHY], num;
  1649. num = mvs_find_dev_phyno(dev, phyno);
  1650. for (i = 0; i < num; i++)
  1651. mvs_do_release_task(mvi, phyno[i], dev);
  1652. }
  1653. static void mvs_phy_disconnected(struct mvs_phy *phy)
  1654. {
  1655. phy->phy_attached = 0;
  1656. phy->att_dev_info = 0;
  1657. phy->att_dev_sas_addr = 0;
  1658. }
  1659. static void mvs_work_queue(struct work_struct *work)
  1660. {
  1661. struct delayed_work *dw = container_of(work, struct delayed_work, work);
  1662. struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
  1663. struct mvs_info *mvi = mwq->mvi;
  1664. unsigned long flags;
  1665. u32 phy_no = (unsigned long) mwq->data;
  1666. struct sas_ha_struct *sas_ha = mvi->sas;
  1667. struct mvs_phy *phy = &mvi->phy[phy_no];
  1668. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1669. spin_lock_irqsave(&mvi->lock, flags);
  1670. if (mwq->handler & PHY_PLUG_EVENT) {
  1671. if (phy->phy_event & PHY_PLUG_OUT) {
  1672. u32 tmp;
  1673. struct sas_identify_frame *id;
  1674. id = (struct sas_identify_frame *)phy->frame_rcvd;
  1675. tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
  1676. phy->phy_event &= ~PHY_PLUG_OUT;
  1677. if (!(tmp & PHY_READY_MASK)) {
  1678. sas_phy_disconnected(sas_phy);
  1679. mvs_phy_disconnected(phy);
  1680. sas_ha->notify_phy_event(sas_phy,
  1681. PHYE_LOSS_OF_SIGNAL);
  1682. mv_dprintk("phy%d Removed Device\n", phy_no);
  1683. } else {
  1684. MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
  1685. mvs_update_phyinfo(mvi, phy_no, 1);
  1686. mvs_bytes_dmaed(mvi, phy_no);
  1687. mvs_port_notify_formed(sas_phy, 0);
  1688. mv_dprintk("phy%d Attached Device\n", phy_no);
  1689. }
  1690. }
  1691. } else if (mwq->handler & EXP_BRCT_CHG) {
  1692. phy->phy_event &= ~EXP_BRCT_CHG;
  1693. sas_ha->notify_port_event(sas_phy,
  1694. PORTE_BROADCAST_RCVD);
  1695. mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
  1696. }
  1697. list_del(&mwq->entry);
  1698. spin_unlock_irqrestore(&mvi->lock, flags);
  1699. kfree(mwq);
  1700. }
  1701. static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
  1702. {
  1703. struct mvs_wq *mwq;
  1704. int ret = 0;
  1705. mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
  1706. if (mwq) {
  1707. mwq->mvi = mvi;
  1708. mwq->data = data;
  1709. mwq->handler = handler;
  1710. MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
  1711. list_add_tail(&mwq->entry, &mvi->wq_list);
  1712. schedule_delayed_work(&mwq->work_q, HZ * 2);
  1713. } else
  1714. ret = -ENOMEM;
  1715. return ret;
  1716. }
  1717. static void mvs_sig_time_out(unsigned long tphy)
  1718. {
  1719. struct mvs_phy *phy = (struct mvs_phy *)tphy;
  1720. struct mvs_info *mvi = phy->mvi;
  1721. u8 phy_no;
  1722. for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
  1723. if (&mvi->phy[phy_no] == phy) {
  1724. mv_dprintk("Get signature time out, reset phy %d\n",
  1725. phy_no+mvi->id*mvi->chip->n_phy);
  1726. MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
  1727. }
  1728. }
  1729. }
  1730. void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
  1731. {
  1732. u32 tmp;
  1733. struct mvs_phy *phy = &mvi->phy[phy_no];
  1734. phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
  1735. MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
  1736. mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
  1737. MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
  1738. mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
  1739. phy->irq_status);
  1740. /*
  1741. * events is port event now ,
  1742. * we need check the interrupt status which belongs to per port.
  1743. */
  1744. if (phy->irq_status & PHYEV_DCDR_ERR) {
  1745. mv_dprintk("phy %d STP decoding error.\n",
  1746. phy_no + mvi->id*mvi->chip->n_phy);
  1747. }
  1748. if (phy->irq_status & PHYEV_POOF) {
  1749. mdelay(500);
  1750. if (!(phy->phy_event & PHY_PLUG_OUT)) {
  1751. int dev_sata = phy->phy_type & PORT_TYPE_SATA;
  1752. int ready;
  1753. mvs_do_release_task(mvi, phy_no, NULL);
  1754. phy->phy_event |= PHY_PLUG_OUT;
  1755. MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
  1756. mvs_handle_event(mvi,
  1757. (void *)(unsigned long)phy_no,
  1758. PHY_PLUG_EVENT);
  1759. ready = mvs_is_phy_ready(mvi, phy_no);
  1760. if (ready || dev_sata) {
  1761. if (MVS_CHIP_DISP->stp_reset)
  1762. MVS_CHIP_DISP->stp_reset(mvi,
  1763. phy_no);
  1764. else
  1765. MVS_CHIP_DISP->phy_reset(mvi,
  1766. phy_no, MVS_SOFT_RESET);
  1767. return;
  1768. }
  1769. }
  1770. }
  1771. if (phy->irq_status & PHYEV_COMWAKE) {
  1772. tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
  1773. MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
  1774. tmp | PHYEV_SIG_FIS);
  1775. if (phy->timer.function == NULL) {
  1776. phy->timer.data = (unsigned long)phy;
  1777. phy->timer.function = mvs_sig_time_out;
  1778. phy->timer.expires = jiffies + 5*HZ;
  1779. add_timer(&phy->timer);
  1780. }
  1781. }
  1782. if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
  1783. phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
  1784. mv_dprintk("notify plug in on phy[%d]\n", phy_no);
  1785. if (phy->phy_status) {
  1786. mdelay(10);
  1787. MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
  1788. if (phy->phy_type & PORT_TYPE_SATA) {
  1789. tmp = MVS_CHIP_DISP->read_port_irq_mask(
  1790. mvi, phy_no);
  1791. tmp &= ~PHYEV_SIG_FIS;
  1792. MVS_CHIP_DISP->write_port_irq_mask(mvi,
  1793. phy_no, tmp);
  1794. }
  1795. mvs_update_phyinfo(mvi, phy_no, 0);
  1796. if (phy->phy_type & PORT_TYPE_SAS) {
  1797. MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
  1798. mdelay(10);
  1799. }
  1800. mvs_bytes_dmaed(mvi, phy_no);
  1801. /* whether driver is going to handle hot plug */
  1802. if (phy->phy_event & PHY_PLUG_OUT) {
  1803. mvs_port_notify_formed(&phy->sas_phy, 0);
  1804. phy->phy_event &= ~PHY_PLUG_OUT;
  1805. }
  1806. } else {
  1807. mv_dprintk("plugin interrupt but phy%d is gone\n",
  1808. phy_no + mvi->id*mvi->chip->n_phy);
  1809. }
  1810. } else if (phy->irq_status & PHYEV_BROAD_CH) {
  1811. mv_dprintk("phy %d broadcast change.\n",
  1812. phy_no + mvi->id*mvi->chip->n_phy);
  1813. mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
  1814. EXP_BRCT_CHG);
  1815. }
  1816. }
  1817. int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
  1818. {
  1819. u32 rx_prod_idx, rx_desc;
  1820. bool attn = false;
  1821. /* the first dword in the RX ring is special: it contains
  1822. * a mirror of the hardware's RX producer index, so that
  1823. * we don't have to stall the CPU reading that register.
  1824. * The actual RX ring is offset by one dword, due to this.
  1825. */
  1826. rx_prod_idx = mvi->rx_cons;
  1827. mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
  1828. if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
  1829. return 0;
  1830. /* The CMPL_Q may come late, read from register and try again
  1831. * note: if coalescing is enabled,
  1832. * it will need to read from register every time for sure
  1833. */
  1834. if (unlikely(mvi->rx_cons == rx_prod_idx))
  1835. mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
  1836. if (mvi->rx_cons == rx_prod_idx)
  1837. return 0;
  1838. while (mvi->rx_cons != rx_prod_idx) {
  1839. /* increment our internal RX consumer pointer */
  1840. rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
  1841. rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
  1842. if (likely(rx_desc & RXQ_DONE))
  1843. mvs_slot_complete(mvi, rx_desc, 0);
  1844. if (rx_desc & RXQ_ATTN) {
  1845. attn = true;
  1846. } else if (rx_desc & RXQ_ERR) {
  1847. if (!(rx_desc & RXQ_DONE))
  1848. mvs_slot_complete(mvi, rx_desc, 0);
  1849. } else if (rx_desc & RXQ_SLOT_RESET) {
  1850. mvs_slot_free(mvi, rx_desc);
  1851. }
  1852. }
  1853. if (attn && self_clear)
  1854. MVS_CHIP_DISP->int_full(mvi);
  1855. return 0;
  1856. }