esas2r_disc.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_disc.c
  3. * esas2r device discovery routines
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:linuxdrivers@attotech.com)
  7. */
  8. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  9. /*
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * NO WARRANTY
  20. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24. * solely responsible for determining the appropriateness of using and
  25. * distributing the Program and assumes all risks associated with its
  26. * exercise of rights under this Agreement, including but not limited to
  27. * the risks and costs of program errors, damage to or loss of data,
  28. * programs or equipment, and unavailability or interruption of operations.
  29. *
  30. * DISCLAIMER OF LIABILITY
  31. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38. *
  39. * You should have received a copy of the GNU General Public License
  40. * along with this program; if not, write to the Free Software
  41. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  42. */
  43. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  44. #include "esas2r.h"
  45. /* Miscellaneous internal discovery routines */
  46. static void esas2r_disc_abort(struct esas2r_adapter *a,
  47. struct esas2r_request *rq);
  48. static bool esas2r_disc_continue(struct esas2r_adapter *a,
  49. struct esas2r_request *rq);
  50. static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
  51. static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
  52. static bool esas2r_disc_start_request(struct esas2r_adapter *a,
  53. struct esas2r_request *rq);
  54. /* Internal discovery routines that process the states */
  55. static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
  56. struct esas2r_request *rq);
  57. static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
  58. struct esas2r_request *rq);
  59. static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
  60. struct esas2r_request *rq);
  61. static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
  62. struct esas2r_request *rq);
  63. static bool esas2r_disc_part_info(struct esas2r_adapter *a,
  64. struct esas2r_request *rq);
  65. static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
  66. struct esas2r_request *rq);
  67. static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
  68. struct esas2r_request *rq);
  69. static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
  70. struct esas2r_request *rq);
  71. static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
  72. struct esas2r_request *rq);
  73. static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
  74. struct esas2r_request *rq);
  75. static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
  76. struct esas2r_request *rq);
  77. static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
  78. struct esas2r_request *rq);
  79. void esas2r_disc_initialize(struct esas2r_adapter *a)
  80. {
  81. struct esas2r_sas_nvram *nvr = a->nvram;
  82. esas2r_trace_enter();
  83. esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
  84. esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
  85. esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
  86. a->disc_start_time = jiffies_to_msecs(jiffies);
  87. a->disc_wait_time = nvr->dev_wait_time * 1000;
  88. a->disc_wait_cnt = nvr->dev_wait_count;
  89. if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
  90. a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
  91. /*
  92. * If we are doing chip reset or power management processing, always
  93. * wait for devices. use the NVRAM device count if it is greater than
  94. * previously discovered devices.
  95. */
  96. esas2r_hdebug("starting discovery...");
  97. a->general_req.interrupt_cx = NULL;
  98. if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
  99. if (a->prev_dev_cnt == 0) {
  100. /* Don't bother waiting if there is nothing to wait
  101. * for.
  102. */
  103. a->disc_wait_time = 0;
  104. } else {
  105. /*
  106. * Set the device wait count to what was previously
  107. * found. We don't care if the user only configured
  108. * a time because we know the exact count to wait for.
  109. * There is no need to honor the user's wishes to
  110. * always wait the full time.
  111. */
  112. a->disc_wait_cnt = a->prev_dev_cnt;
  113. /*
  114. * bump the minimum wait time to 15 seconds since the
  115. * default is 3 (system boot or the boot driver usually
  116. * buys us more time).
  117. */
  118. if (a->disc_wait_time < 15000)
  119. a->disc_wait_time = 15000;
  120. }
  121. }
  122. esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
  123. esas2r_trace("disc wait time: %d", a->disc_wait_time);
  124. if (a->disc_wait_time == 0)
  125. esas2r_disc_check_complete(a);
  126. esas2r_trace_exit();
  127. }
  128. void esas2r_disc_start_waiting(struct esas2r_adapter *a)
  129. {
  130. unsigned long flags;
  131. spin_lock_irqsave(&a->mem_lock, flags);
  132. if (a->disc_ctx.disc_evt)
  133. esas2r_disc_start_port(a);
  134. spin_unlock_irqrestore(&a->mem_lock, flags);
  135. }
  136. void esas2r_disc_check_for_work(struct esas2r_adapter *a)
  137. {
  138. struct esas2r_request *rq = &a->general_req;
  139. /* service any pending interrupts first */
  140. esas2r_polled_interrupt(a);
  141. /*
  142. * now, interrupt processing may have queued up a discovery event. go
  143. * see if we have one to start. we couldn't start it in the ISR since
  144. * polled discovery would cause a deadlock.
  145. */
  146. esas2r_disc_start_waiting(a);
  147. if (rq->interrupt_cx == NULL)
  148. return;
  149. if (rq->req_stat == RS_STARTED
  150. && rq->timeout <= RQ_MAX_TIMEOUT) {
  151. /* wait for the current discovery request to complete. */
  152. esas2r_wait_request(a, rq);
  153. if (rq->req_stat == RS_TIMEOUT) {
  154. esas2r_disc_abort(a, rq);
  155. esas2r_local_reset_adapter(a);
  156. return;
  157. }
  158. }
  159. if (rq->req_stat == RS_PENDING
  160. || rq->req_stat == RS_STARTED)
  161. return;
  162. esas2r_disc_continue(a, rq);
  163. }
  164. void esas2r_disc_check_complete(struct esas2r_adapter *a)
  165. {
  166. unsigned long flags;
  167. esas2r_trace_enter();
  168. /* check to see if we should be waiting for devices */
  169. if (a->disc_wait_time) {
  170. u32 currtime = jiffies_to_msecs(jiffies);
  171. u32 time = currtime - a->disc_start_time;
  172. /*
  173. * Wait until the device wait time is exhausted or the device
  174. * wait count is satisfied.
  175. */
  176. if (time < a->disc_wait_time
  177. && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
  178. || a->disc_wait_cnt == 0)) {
  179. /* After three seconds of waiting, schedule a scan. */
  180. if (time >= 3000
  181. && !(esas2r_lock_set_flags(&a->flags2,
  182. AF2_DEV_SCAN) &
  183. ilog2(AF2_DEV_SCAN))) {
  184. spin_lock_irqsave(&a->mem_lock, flags);
  185. esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
  186. spin_unlock_irqrestore(&a->mem_lock, flags);
  187. }
  188. esas2r_trace_exit();
  189. return;
  190. }
  191. /*
  192. * We are done waiting...we think. Adjust the wait time to
  193. * consume events after the count is met.
  194. */
  195. if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
  196. & ilog2(AF2_DEV_CNT_OK)))
  197. a->disc_wait_time = time + 3000;
  198. /* If we haven't done a full scan yet, do it now. */
  199. if (!(esas2r_lock_set_flags(&a->flags2,
  200. AF2_DEV_SCAN) &
  201. ilog2(AF2_DEV_SCAN))) {
  202. spin_lock_irqsave(&a->mem_lock, flags);
  203. esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
  204. spin_unlock_irqrestore(&a->mem_lock, flags);
  205. esas2r_trace_exit();
  206. return;
  207. }
  208. /*
  209. * Now, if there is still time left to consume events, continue
  210. * waiting.
  211. */
  212. if (time < a->disc_wait_time) {
  213. esas2r_trace_exit();
  214. return;
  215. }
  216. } else {
  217. if (!(esas2r_lock_set_flags(&a->flags2,
  218. AF2_DEV_SCAN) &
  219. ilog2(AF2_DEV_SCAN))) {
  220. spin_lock_irqsave(&a->mem_lock, flags);
  221. esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
  222. spin_unlock_irqrestore(&a->mem_lock, flags);
  223. }
  224. }
  225. /* We want to stop waiting for devices. */
  226. a->disc_wait_time = 0;
  227. if ((a->flags & AF_DISC_POLLED)
  228. && (a->flags & AF_DISC_IN_PROG)) {
  229. /*
  230. * Polled discovery is still pending so continue the active
  231. * discovery until it is done. At that point, we will stop
  232. * polled discovery and transition to interrupt driven
  233. * discovery.
  234. */
  235. } else {
  236. /*
  237. * Done waiting for devices. Note that we get here immediately
  238. * after deferred waiting completes because that is interrupt
  239. * driven; i.e. There is no transition.
  240. */
  241. esas2r_disc_fix_curr_requests(a);
  242. esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
  243. /*
  244. * We have deferred target state changes until now because we
  245. * don't want to report any removals (due to the first arrival)
  246. * until the device wait time expires.
  247. */
  248. esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
  249. }
  250. esas2r_trace_exit();
  251. }
  252. void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
  253. {
  254. struct esas2r_disc_context *dc = &a->disc_ctx;
  255. esas2r_trace_enter();
  256. esas2r_trace("disc_event: %d", disc_evt);
  257. /* Initialize the discovery context */
  258. dc->disc_evt |= disc_evt;
  259. /*
  260. * Don't start discovery before or during polled discovery. if we did,
  261. * we would have a deadlock if we are in the ISR already.
  262. */
  263. if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
  264. esas2r_disc_start_port(a);
  265. esas2r_trace_exit();
  266. }
  267. bool esas2r_disc_start_port(struct esas2r_adapter *a)
  268. {
  269. struct esas2r_request *rq = &a->general_req;
  270. struct esas2r_disc_context *dc = &a->disc_ctx;
  271. bool ret;
  272. esas2r_trace_enter();
  273. if (a->flags & AF_DISC_IN_PROG) {
  274. esas2r_trace_exit();
  275. return false;
  276. }
  277. /* If there is a discovery waiting, process it. */
  278. if (dc->disc_evt) {
  279. if ((a->flags & AF_DISC_POLLED)
  280. && a->disc_wait_time == 0) {
  281. /*
  282. * We are doing polled discovery, but we no longer want
  283. * to wait for devices. Stop polled discovery and
  284. * transition to interrupt driven discovery.
  285. */
  286. esas2r_trace_exit();
  287. return false;
  288. }
  289. } else {
  290. /* Discovery is complete. */
  291. esas2r_hdebug("disc done");
  292. esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
  293. esas2r_trace_exit();
  294. return false;
  295. }
  296. /* Handle the discovery context */
  297. esas2r_trace("disc_evt: %d", dc->disc_evt);
  298. esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
  299. dc->flags = 0;
  300. if (a->flags & AF_DISC_POLLED)
  301. dc->flags |= DCF_POLLED;
  302. rq->interrupt_cx = dc;
  303. rq->req_stat = RS_SUCCESS;
  304. /* Decode the event code */
  305. if (dc->disc_evt & DCDE_DEV_SCAN) {
  306. dc->disc_evt &= ~DCDE_DEV_SCAN;
  307. dc->flags |= DCF_DEV_SCAN;
  308. dc->state = DCS_BLOCK_DEV_SCAN;
  309. } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
  310. dc->disc_evt &= ~DCDE_DEV_CHANGE;
  311. dc->flags |= DCF_DEV_CHANGE;
  312. dc->state = DCS_DEV_RMV;
  313. }
  314. /* Continue interrupt driven discovery */
  315. if (!(a->flags & AF_DISC_POLLED))
  316. ret = esas2r_disc_continue(a, rq);
  317. else
  318. ret = true;
  319. esas2r_trace_exit();
  320. return ret;
  321. }
  322. static bool esas2r_disc_continue(struct esas2r_adapter *a,
  323. struct esas2r_request *rq)
  324. {
  325. struct esas2r_disc_context *dc =
  326. (struct esas2r_disc_context *)rq->interrupt_cx;
  327. bool rslt;
  328. /* Device discovery/removal */
  329. while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
  330. rslt = false;
  331. switch (dc->state) {
  332. case DCS_DEV_RMV:
  333. rslt = esas2r_disc_dev_remove(a, rq);
  334. break;
  335. case DCS_DEV_ADD:
  336. rslt = esas2r_disc_dev_add(a, rq);
  337. break;
  338. case DCS_BLOCK_DEV_SCAN:
  339. rslt = esas2r_disc_block_dev_scan(a, rq);
  340. break;
  341. case DCS_RAID_GRP_INFO:
  342. rslt = esas2r_disc_raid_grp_info(a, rq);
  343. break;
  344. case DCS_PART_INFO:
  345. rslt = esas2r_disc_part_info(a, rq);
  346. break;
  347. case DCS_PT_DEV_INFO:
  348. rslt = esas2r_disc_passthru_dev_info(a, rq);
  349. break;
  350. case DCS_PT_DEV_ADDR:
  351. rslt = esas2r_disc_passthru_dev_addr(a, rq);
  352. break;
  353. case DCS_DISC_DONE:
  354. dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
  355. break;
  356. default:
  357. esas2r_bugon();
  358. dc->state = DCS_DISC_DONE;
  359. break;
  360. }
  361. if (rslt)
  362. return true;
  363. }
  364. /* Discovery is done...for now. */
  365. rq->interrupt_cx = NULL;
  366. if (!(a->flags & AF_DISC_PENDING))
  367. esas2r_disc_fix_curr_requests(a);
  368. esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
  369. /* Start the next discovery. */
  370. return esas2r_disc_start_port(a);
  371. }
  372. static bool esas2r_disc_start_request(struct esas2r_adapter *a,
  373. struct esas2r_request *rq)
  374. {
  375. unsigned long flags;
  376. /* Set the timeout to a minimum value. */
  377. if (rq->timeout < ESAS2R_DEFAULT_TMO)
  378. rq->timeout = ESAS2R_DEFAULT_TMO;
  379. /*
  380. * Override the request type to distinguish discovery requests. If we
  381. * end up deferring the request, esas2r_disc_local_start_request()
  382. * will be called to restart it.
  383. */
  384. rq->req_type = RT_DISC_REQ;
  385. spin_lock_irqsave(&a->queue_lock, flags);
  386. if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
  387. esas2r_disc_local_start_request(a, rq);
  388. else
  389. list_add_tail(&rq->req_list, &a->defer_list);
  390. spin_unlock_irqrestore(&a->queue_lock, flags);
  391. return true;
  392. }
  393. void esas2r_disc_local_start_request(struct esas2r_adapter *a,
  394. struct esas2r_request *rq)
  395. {
  396. esas2r_trace_enter();
  397. list_add_tail(&rq->req_list, &a->active_list);
  398. esas2r_start_vda_request(a, rq);
  399. esas2r_trace_exit();
  400. return;
  401. }
  402. static void esas2r_disc_abort(struct esas2r_adapter *a,
  403. struct esas2r_request *rq)
  404. {
  405. struct esas2r_disc_context *dc =
  406. (struct esas2r_disc_context *)rq->interrupt_cx;
  407. esas2r_trace_enter();
  408. /* abort the current discovery */
  409. dc->state = DCS_DISC_DONE;
  410. esas2r_trace_exit();
  411. }
  412. static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
  413. struct esas2r_request *rq)
  414. {
  415. struct esas2r_disc_context *dc =
  416. (struct esas2r_disc_context *)rq->interrupt_cx;
  417. bool rslt;
  418. esas2r_trace_enter();
  419. esas2r_rq_init_request(rq, a);
  420. esas2r_build_mgt_req(a,
  421. rq,
  422. VDAMGT_DEV_SCAN,
  423. 0,
  424. 0,
  425. 0,
  426. NULL);
  427. rq->comp_cb = esas2r_disc_block_dev_scan_cb;
  428. rq->timeout = 30000;
  429. rq->interrupt_cx = dc;
  430. rslt = esas2r_disc_start_request(a, rq);
  431. esas2r_trace_exit();
  432. return rslt;
  433. }
  434. static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
  435. struct esas2r_request *rq)
  436. {
  437. struct esas2r_disc_context *dc =
  438. (struct esas2r_disc_context *)rq->interrupt_cx;
  439. unsigned long flags;
  440. esas2r_trace_enter();
  441. spin_lock_irqsave(&a->mem_lock, flags);
  442. if (rq->req_stat == RS_SUCCESS)
  443. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  444. dc->state = DCS_RAID_GRP_INFO;
  445. dc->raid_grp_ix = 0;
  446. esas2r_rq_destroy_request(rq, a);
  447. /* continue discovery if it's interrupt driven */
  448. if (!(dc->flags & DCF_POLLED))
  449. esas2r_disc_continue(a, rq);
  450. spin_unlock_irqrestore(&a->mem_lock, flags);
  451. esas2r_trace_exit();
  452. }
  453. static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
  454. struct esas2r_request *rq)
  455. {
  456. struct esas2r_disc_context *dc =
  457. (struct esas2r_disc_context *)rq->interrupt_cx;
  458. bool rslt;
  459. struct atto_vda_grp_info *grpinfo;
  460. esas2r_trace_enter();
  461. esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
  462. if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
  463. dc->state = DCS_DISC_DONE;
  464. esas2r_trace_exit();
  465. return false;
  466. }
  467. esas2r_rq_init_request(rq, a);
  468. grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
  469. memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
  470. esas2r_build_mgt_req(a,
  471. rq,
  472. VDAMGT_GRP_INFO,
  473. dc->scan_gen,
  474. 0,
  475. sizeof(struct atto_vda_grp_info),
  476. NULL);
  477. grpinfo->grp_index = dc->raid_grp_ix;
  478. rq->comp_cb = esas2r_disc_raid_grp_info_cb;
  479. rq->interrupt_cx = dc;
  480. rslt = esas2r_disc_start_request(a, rq);
  481. esas2r_trace_exit();
  482. return rslt;
  483. }
  484. static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
  485. struct esas2r_request *rq)
  486. {
  487. struct esas2r_disc_context *dc =
  488. (struct esas2r_disc_context *)rq->interrupt_cx;
  489. unsigned long flags;
  490. struct atto_vda_grp_info *grpinfo;
  491. esas2r_trace_enter();
  492. spin_lock_irqsave(&a->mem_lock, flags);
  493. if (rq->req_stat == RS_SCAN_GEN) {
  494. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  495. dc->raid_grp_ix = 0;
  496. goto done;
  497. }
  498. if (rq->req_stat == RS_SUCCESS) {
  499. grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
  500. if (grpinfo->status != VDA_GRP_STAT_ONLINE
  501. && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
  502. /* go to the next group. */
  503. dc->raid_grp_ix++;
  504. } else {
  505. memcpy(&dc->raid_grp_name[0],
  506. &grpinfo->grp_name[0],
  507. sizeof(grpinfo->grp_name));
  508. dc->interleave = le32_to_cpu(grpinfo->interleave);
  509. dc->block_size = le32_to_cpu(grpinfo->block_size);
  510. dc->state = DCS_PART_INFO;
  511. dc->part_num = 0;
  512. }
  513. } else {
  514. if (!(rq->req_stat == RS_GRP_INVALID)) {
  515. esas2r_log(ESAS2R_LOG_WARN,
  516. "A request for RAID group info failed - "
  517. "returned with %x",
  518. rq->req_stat);
  519. }
  520. dc->dev_ix = 0;
  521. dc->state = DCS_PT_DEV_INFO;
  522. }
  523. done:
  524. esas2r_rq_destroy_request(rq, a);
  525. /* continue discovery if it's interrupt driven */
  526. if (!(dc->flags & DCF_POLLED))
  527. esas2r_disc_continue(a, rq);
  528. spin_unlock_irqrestore(&a->mem_lock, flags);
  529. esas2r_trace_exit();
  530. }
  531. static bool esas2r_disc_part_info(struct esas2r_adapter *a,
  532. struct esas2r_request *rq)
  533. {
  534. struct esas2r_disc_context *dc =
  535. (struct esas2r_disc_context *)rq->interrupt_cx;
  536. bool rslt;
  537. struct atto_vdapart_info *partinfo;
  538. esas2r_trace_enter();
  539. esas2r_trace("part_num: %d", dc->part_num);
  540. if (dc->part_num >= VDA_MAX_PARTITIONS) {
  541. dc->state = DCS_RAID_GRP_INFO;
  542. dc->raid_grp_ix++;
  543. esas2r_trace_exit();
  544. return false;
  545. }
  546. esas2r_rq_init_request(rq, a);
  547. partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
  548. memset(partinfo, 0, sizeof(struct atto_vdapart_info));
  549. esas2r_build_mgt_req(a,
  550. rq,
  551. VDAMGT_PART_INFO,
  552. dc->scan_gen,
  553. 0,
  554. sizeof(struct atto_vdapart_info),
  555. NULL);
  556. partinfo->part_no = dc->part_num;
  557. memcpy(&partinfo->grp_name[0],
  558. &dc->raid_grp_name[0],
  559. sizeof(partinfo->grp_name));
  560. rq->comp_cb = esas2r_disc_part_info_cb;
  561. rq->interrupt_cx = dc;
  562. rslt = esas2r_disc_start_request(a, rq);
  563. esas2r_trace_exit();
  564. return rslt;
  565. }
  566. static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
  567. struct esas2r_request *rq)
  568. {
  569. struct esas2r_disc_context *dc =
  570. (struct esas2r_disc_context *)rq->interrupt_cx;
  571. unsigned long flags;
  572. struct atto_vdapart_info *partinfo;
  573. esas2r_trace_enter();
  574. spin_lock_irqsave(&a->mem_lock, flags);
  575. if (rq->req_stat == RS_SCAN_GEN) {
  576. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  577. dc->raid_grp_ix = 0;
  578. dc->state = DCS_RAID_GRP_INFO;
  579. } else if (rq->req_stat == RS_SUCCESS) {
  580. partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
  581. dc->part_num = partinfo->part_no;
  582. dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
  583. esas2r_targ_db_add_raid(a, dc);
  584. dc->part_num++;
  585. } else {
  586. if (!(rq->req_stat == RS_PART_LAST)) {
  587. esas2r_log(ESAS2R_LOG_WARN,
  588. "A request for RAID group partition info "
  589. "failed - status:%d", rq->req_stat);
  590. }
  591. dc->state = DCS_RAID_GRP_INFO;
  592. dc->raid_grp_ix++;
  593. }
  594. esas2r_rq_destroy_request(rq, a);
  595. /* continue discovery if it's interrupt driven */
  596. if (!(dc->flags & DCF_POLLED))
  597. esas2r_disc_continue(a, rq);
  598. spin_unlock_irqrestore(&a->mem_lock, flags);
  599. esas2r_trace_exit();
  600. }
  601. static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
  602. struct esas2r_request *rq)
  603. {
  604. struct esas2r_disc_context *dc =
  605. (struct esas2r_disc_context *)rq->interrupt_cx;
  606. bool rslt;
  607. struct atto_vda_devinfo *devinfo;
  608. esas2r_trace_enter();
  609. esas2r_trace("dev_ix: %d", dc->dev_ix);
  610. esas2r_rq_init_request(rq, a);
  611. devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
  612. memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
  613. esas2r_build_mgt_req(a,
  614. rq,
  615. VDAMGT_DEV_PT_INFO,
  616. dc->scan_gen,
  617. dc->dev_ix,
  618. sizeof(struct atto_vda_devinfo),
  619. NULL);
  620. rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
  621. rq->interrupt_cx = dc;
  622. rslt = esas2r_disc_start_request(a, rq);
  623. esas2r_trace_exit();
  624. return rslt;
  625. }
  626. static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
  627. struct esas2r_request *rq)
  628. {
  629. struct esas2r_disc_context *dc =
  630. (struct esas2r_disc_context *)rq->interrupt_cx;
  631. unsigned long flags;
  632. struct atto_vda_devinfo *devinfo;
  633. esas2r_trace_enter();
  634. spin_lock_irqsave(&a->mem_lock, flags);
  635. if (rq->req_stat == RS_SCAN_GEN) {
  636. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  637. dc->dev_ix = 0;
  638. dc->state = DCS_PT_DEV_INFO;
  639. } else if (rq->req_stat == RS_SUCCESS) {
  640. devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
  641. dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
  642. dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
  643. if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
  644. dc->curr_phys_id =
  645. le16_to_cpu(devinfo->phys_target_id);
  646. dc->dev_addr_type = ATTO_GDA_AT_PORT;
  647. dc->state = DCS_PT_DEV_ADDR;
  648. esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
  649. esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
  650. } else {
  651. dc->dev_ix++;
  652. }
  653. } else {
  654. if (!(rq->req_stat == RS_DEV_INVALID)) {
  655. esas2r_log(ESAS2R_LOG_WARN,
  656. "A request for device information failed - "
  657. "status:%d", rq->req_stat);
  658. }
  659. dc->state = DCS_DISC_DONE;
  660. }
  661. esas2r_rq_destroy_request(rq, a);
  662. /* continue discovery if it's interrupt driven */
  663. if (!(dc->flags & DCF_POLLED))
  664. esas2r_disc_continue(a, rq);
  665. spin_unlock_irqrestore(&a->mem_lock, flags);
  666. esas2r_trace_exit();
  667. }
  668. static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
  669. struct esas2r_request *rq)
  670. {
  671. struct esas2r_disc_context *dc =
  672. (struct esas2r_disc_context *)rq->interrupt_cx;
  673. bool rslt;
  674. struct atto_ioctl *hi;
  675. struct esas2r_sg_context sgc;
  676. esas2r_trace_enter();
  677. esas2r_rq_init_request(rq, a);
  678. /* format the request. */
  679. sgc.cur_offset = NULL;
  680. sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
  681. sgc.length = offsetof(struct atto_ioctl, data)
  682. + sizeof(struct atto_hba_get_device_address);
  683. esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
  684. esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
  685. if (!esas2r_build_sg_list(a, rq, &sgc)) {
  686. esas2r_rq_destroy_request(rq, a);
  687. esas2r_trace_exit();
  688. return false;
  689. }
  690. rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
  691. rq->interrupt_cx = dc;
  692. /* format the IOCTL data. */
  693. hi = (struct atto_ioctl *)a->disc_buffer;
  694. memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
  695. hi->version = ATTO_VER_GET_DEV_ADDR0;
  696. hi->function = ATTO_FUNC_GET_DEV_ADDR;
  697. hi->flags = HBAF_TUNNEL;
  698. hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
  699. hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
  700. /* start it up. */
  701. rslt = esas2r_disc_start_request(a, rq);
  702. esas2r_trace_exit();
  703. return rslt;
  704. }
  705. static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
  706. struct esas2r_request *rq)
  707. {
  708. struct esas2r_disc_context *dc =
  709. (struct esas2r_disc_context *)rq->interrupt_cx;
  710. struct esas2r_target *t = NULL;
  711. unsigned long flags;
  712. struct atto_ioctl *hi;
  713. u16 addrlen;
  714. esas2r_trace_enter();
  715. spin_lock_irqsave(&a->mem_lock, flags);
  716. hi = (struct atto_ioctl *)a->disc_buffer;
  717. if (rq->req_stat == RS_SUCCESS
  718. && hi->status == ATTO_STS_SUCCESS) {
  719. addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
  720. if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
  721. if (addrlen == sizeof(u64))
  722. memcpy(&dc->sas_addr,
  723. &hi->data.get_dev_addr.address[0],
  724. addrlen);
  725. else
  726. memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
  727. /* Get the unique identifier. */
  728. dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
  729. goto next_dev_addr;
  730. } else {
  731. /* Add the pass through target. */
  732. if (HIBYTE(addrlen) == 0) {
  733. t = esas2r_targ_db_add_pthru(a,
  734. dc,
  735. &hi->data.
  736. get_dev_addr.
  737. address[0],
  738. (u8)hi->data.
  739. get_dev_addr.
  740. addr_len);
  741. if (t)
  742. memcpy(&t->sas_addr, &dc->sas_addr,
  743. sizeof(t->sas_addr));
  744. } else {
  745. /* getting the back end data failed */
  746. esas2r_log(ESAS2R_LOG_WARN,
  747. "an error occurred retrieving the "
  748. "back end data (%s:%d)",
  749. __func__,
  750. __LINE__);
  751. }
  752. }
  753. } else {
  754. /* getting the back end data failed */
  755. esas2r_log(ESAS2R_LOG_WARN,
  756. "an error occurred retrieving the back end data - "
  757. "rq->req_stat:%d hi->status:%d",
  758. rq->req_stat, hi->status);
  759. }
  760. /* proceed to the next device. */
  761. if (dc->flags & DCF_DEV_SCAN) {
  762. dc->dev_ix++;
  763. dc->state = DCS_PT_DEV_INFO;
  764. } else if (dc->flags & DCF_DEV_CHANGE) {
  765. dc->curr_targ++;
  766. dc->state = DCS_DEV_ADD;
  767. } else {
  768. esas2r_bugon();
  769. }
  770. next_dev_addr:
  771. esas2r_rq_destroy_request(rq, a);
  772. /* continue discovery if it's interrupt driven */
  773. if (!(dc->flags & DCF_POLLED))
  774. esas2r_disc_continue(a, rq);
  775. spin_unlock_irqrestore(&a->mem_lock, flags);
  776. esas2r_trace_exit();
  777. }
  778. static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
  779. {
  780. struct esas2r_adapter *a = sgc->adapter;
  781. if (sgc->length > ESAS2R_DISC_BUF_LEN)
  782. esas2r_bugon();
  783. *addr = a->uncached_phys
  784. + (u64)((u8 *)a->disc_buffer - a->uncached);
  785. return sgc->length;
  786. }
  787. static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
  788. struct esas2r_request *rq)
  789. {
  790. struct esas2r_disc_context *dc =
  791. (struct esas2r_disc_context *)rq->interrupt_cx;
  792. struct esas2r_target *t;
  793. struct esas2r_target *t2;
  794. esas2r_trace_enter();
  795. /* process removals. */
  796. for (t = a->targetdb; t < a->targetdb_end; t++) {
  797. if (t->new_target_state != TS_NOT_PRESENT)
  798. continue;
  799. t->new_target_state = TS_INVALID;
  800. /* remove the right target! */
  801. t2 =
  802. esas2r_targ_db_find_by_virt_id(a,
  803. esas2r_targ_get_id(t,
  804. a));
  805. if (t2)
  806. esas2r_targ_db_remove(a, t2);
  807. }
  808. /* removals complete. process arrivals. */
  809. dc->state = DCS_DEV_ADD;
  810. dc->curr_targ = a->targetdb;
  811. esas2r_trace_exit();
  812. return false;
  813. }
  814. static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
  815. struct esas2r_request *rq)
  816. {
  817. struct esas2r_disc_context *dc =
  818. (struct esas2r_disc_context *)rq->interrupt_cx;
  819. struct esas2r_target *t = dc->curr_targ;
  820. if (t >= a->targetdb_end) {
  821. /* done processing state changes. */
  822. dc->state = DCS_DISC_DONE;
  823. } else if (t->new_target_state == TS_PRESENT) {
  824. struct atto_vda_ae_lu *luevt = &t->lu_event;
  825. esas2r_trace_enter();
  826. /* clear this now in case more events come in. */
  827. t->new_target_state = TS_INVALID;
  828. /* setup the discovery context for adding this device. */
  829. dc->curr_virt_id = esas2r_targ_get_id(t, a);
  830. if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
  831. + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
  832. && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
  833. dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
  834. dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
  835. } else {
  836. dc->block_size = 0;
  837. dc->interleave = 0;
  838. }
  839. /* determine the device type being added. */
  840. if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
  841. if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
  842. dc->state = DCS_PT_DEV_ADDR;
  843. dc->dev_addr_type = ATTO_GDA_AT_PORT;
  844. dc->curr_phys_id = luevt->wphys_target_id;
  845. } else {
  846. esas2r_log(ESAS2R_LOG_WARN,
  847. "luevt->dwevent does not have the "
  848. "VDAAE_LU_PHYS_ID bit set (%s:%d)",
  849. __func__, __LINE__);
  850. }
  851. } else {
  852. dc->raid_grp_name[0] = 0;
  853. esas2r_targ_db_add_raid(a, dc);
  854. }
  855. esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
  856. esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
  857. esas2r_trace("dwevent: %d", luevt->dwevent);
  858. esas2r_trace_exit();
  859. }
  860. if (dc->state == DCS_DEV_ADD) {
  861. /* go to the next device. */
  862. dc->curr_targ++;
  863. }
  864. return false;
  865. }
  866. /*
  867. * When discovery is done, find all requests on defer queue and
  868. * test if they need to be modified. If a target is no longer present
  869. * then complete the request with RS_SEL. Otherwise, update the
  870. * target_id since after a hibernate it can be a different value.
  871. * VDA does not make passthrough target IDs persistent.
  872. */
  873. static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
  874. {
  875. unsigned long flags;
  876. struct esas2r_target *t;
  877. struct esas2r_request *rq;
  878. struct list_head *element;
  879. /* update virt_targ_id in any outstanding esas2r_requests */
  880. spin_lock_irqsave(&a->queue_lock, flags);
  881. list_for_each(element, &a->defer_list) {
  882. rq = list_entry(element, struct esas2r_request, req_list);
  883. if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
  884. t = a->targetdb + rq->target_id;
  885. if (t->target_state == TS_PRESENT)
  886. rq->vrq->scsi.target_id = le16_to_cpu(
  887. t->virt_targ_id);
  888. else
  889. rq->req_stat = RS_SEL;
  890. }
  891. }
  892. spin_unlock_irqrestore(&a->queue_lock, flags);
  893. }