zfcp_fsf.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629
  1. /*
  2. * zfcp device driver
  3. *
  4. * Implementation of FSF commands.
  5. *
  6. * Copyright IBM Corporation 2002, 2009
  7. */
  8. #define KMSG_COMPONENT "zfcp"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/blktrace_api.h>
  11. #include "zfcp_ext.h"
  12. #include "zfcp_dbf.h"
  13. static void zfcp_fsf_request_timeout_handler(unsigned long data)
  14. {
  15. struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
  16. zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
  17. "fsrth_1", NULL);
  18. }
  19. static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
  20. unsigned long timeout)
  21. {
  22. fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
  23. fsf_req->timer.data = (unsigned long) fsf_req->adapter;
  24. fsf_req->timer.expires = jiffies + timeout;
  25. add_timer(&fsf_req->timer);
  26. }
  27. static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
  28. {
  29. BUG_ON(!fsf_req->erp_action);
  30. fsf_req->timer.function = zfcp_erp_timeout_handler;
  31. fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
  32. fsf_req->timer.expires = jiffies + 30 * HZ;
  33. add_timer(&fsf_req->timer);
  34. }
  35. /* association between FSF command and FSF QTCB type */
  36. static u32 fsf_qtcb_type[] = {
  37. [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
  38. [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
  39. [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
  40. [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
  41. [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
  42. [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
  43. [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
  44. [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
  45. [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
  46. [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
  47. [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
  48. [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
  49. [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
  50. };
  51. static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
  52. {
  53. u16 subtable = table >> 16;
  54. u16 rule = table & 0xffff;
  55. const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
  56. if (subtable && subtable < ARRAY_SIZE(act_type))
  57. dev_warn(&adapter->ccw_device->dev,
  58. "Access denied according to ACT rule type %s, "
  59. "rule %d\n", act_type[subtable], rule);
  60. }
  61. static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
  62. struct zfcp_port *port)
  63. {
  64. struct fsf_qtcb_header *header = &req->qtcb->header;
  65. dev_warn(&req->adapter->ccw_device->dev,
  66. "Access denied to port 0x%016Lx\n",
  67. (unsigned long long)port->wwpn);
  68. zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
  69. zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
  70. zfcp_erp_port_access_denied(port, "fspad_1", req);
  71. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  72. }
  73. static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
  74. struct zfcp_unit *unit)
  75. {
  76. struct fsf_qtcb_header *header = &req->qtcb->header;
  77. dev_warn(&req->adapter->ccw_device->dev,
  78. "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
  79. (unsigned long long)unit->fcp_lun,
  80. (unsigned long long)unit->port->wwpn);
  81. zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
  82. zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
  83. zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
  84. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  85. }
  86. static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
  87. {
  88. dev_err(&req->adapter->ccw_device->dev, "FCP device not "
  89. "operational because of an unsupported FC class\n");
  90. zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
  91. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  92. }
  93. /**
  94. * zfcp_fsf_req_free - free memory used by fsf request
  95. * @fsf_req: pointer to struct zfcp_fsf_req
  96. */
  97. void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
  98. {
  99. if (likely(req->pool)) {
  100. if (likely(req->qtcb))
  101. mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
  102. mempool_free(req, req->pool);
  103. return;
  104. }
  105. if (likely(req->qtcb))
  106. kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
  107. kfree(req);
  108. }
  109. static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
  110. {
  111. unsigned long flags;
  112. struct fsf_status_read_buffer *sr_buf = req->data;
  113. struct zfcp_adapter *adapter = req->adapter;
  114. struct zfcp_port *port;
  115. int d_id = sr_buf->d_id & ZFCP_DID_MASK;
  116. read_lock_irqsave(&adapter->port_list_lock, flags);
  117. list_for_each_entry(port, &adapter->port_list, list)
  118. if (port->d_id == d_id) {
  119. zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
  120. break;
  121. }
  122. read_unlock_irqrestore(&adapter->port_list_lock, flags);
  123. }
  124. static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
  125. struct fsf_link_down_info *link_down)
  126. {
  127. struct zfcp_adapter *adapter = req->adapter;
  128. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
  129. return;
  130. atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
  131. zfcp_scsi_schedule_rports_block(adapter);
  132. if (!link_down)
  133. goto out;
  134. switch (link_down->error_code) {
  135. case FSF_PSQ_LINK_NO_LIGHT:
  136. dev_warn(&req->adapter->ccw_device->dev,
  137. "There is no light signal from the local "
  138. "fibre channel cable\n");
  139. break;
  140. case FSF_PSQ_LINK_WRAP_PLUG:
  141. dev_warn(&req->adapter->ccw_device->dev,
  142. "There is a wrap plug instead of a fibre "
  143. "channel cable\n");
  144. break;
  145. case FSF_PSQ_LINK_NO_FCP:
  146. dev_warn(&req->adapter->ccw_device->dev,
  147. "The adjacent fibre channel node does not "
  148. "support FCP\n");
  149. break;
  150. case FSF_PSQ_LINK_FIRMWARE_UPDATE:
  151. dev_warn(&req->adapter->ccw_device->dev,
  152. "The FCP device is suspended because of a "
  153. "firmware update\n");
  154. break;
  155. case FSF_PSQ_LINK_INVALID_WWPN:
  156. dev_warn(&req->adapter->ccw_device->dev,
  157. "The FCP device detected a WWPN that is "
  158. "duplicate or not valid\n");
  159. break;
  160. case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
  161. dev_warn(&req->adapter->ccw_device->dev,
  162. "The fibre channel fabric does not support NPIV\n");
  163. break;
  164. case FSF_PSQ_LINK_NO_FCP_RESOURCES:
  165. dev_warn(&req->adapter->ccw_device->dev,
  166. "The FCP adapter cannot support more NPIV ports\n");
  167. break;
  168. case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
  169. dev_warn(&req->adapter->ccw_device->dev,
  170. "The adjacent switch cannot support "
  171. "more NPIV ports\n");
  172. break;
  173. case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
  174. dev_warn(&req->adapter->ccw_device->dev,
  175. "The FCP adapter could not log in to the "
  176. "fibre channel fabric\n");
  177. break;
  178. case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
  179. dev_warn(&req->adapter->ccw_device->dev,
  180. "The WWPN assignment file on the FCP adapter "
  181. "has been damaged\n");
  182. break;
  183. case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
  184. dev_warn(&req->adapter->ccw_device->dev,
  185. "The mode table on the FCP adapter "
  186. "has been damaged\n");
  187. break;
  188. case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
  189. dev_warn(&req->adapter->ccw_device->dev,
  190. "All NPIV ports on the FCP adapter have "
  191. "been assigned\n");
  192. break;
  193. default:
  194. dev_warn(&req->adapter->ccw_device->dev,
  195. "The link between the FCP adapter and "
  196. "the FC fabric is down\n");
  197. }
  198. out:
  199. zfcp_erp_adapter_failed(adapter, id, req);
  200. }
  201. static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
  202. {
  203. struct fsf_status_read_buffer *sr_buf = req->data;
  204. struct fsf_link_down_info *ldi =
  205. (struct fsf_link_down_info *) &sr_buf->payload;
  206. switch (sr_buf->status_subtype) {
  207. case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
  208. zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
  209. break;
  210. case FSF_STATUS_READ_SUB_FDISC_FAILED:
  211. zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
  212. break;
  213. case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
  214. zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
  215. };
  216. }
  217. static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
  218. {
  219. struct zfcp_adapter *adapter = req->adapter;
  220. struct fsf_status_read_buffer *sr_buf = req->data;
  221. if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
  222. zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
  223. mempool_free(sr_buf, adapter->pool.status_read_data);
  224. zfcp_fsf_req_free(req);
  225. return;
  226. }
  227. zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
  228. switch (sr_buf->status_type) {
  229. case FSF_STATUS_READ_PORT_CLOSED:
  230. zfcp_fsf_status_read_port_closed(req);
  231. break;
  232. case FSF_STATUS_READ_INCOMING_ELS:
  233. zfcp_fc_incoming_els(req);
  234. break;
  235. case FSF_STATUS_READ_SENSE_DATA_AVAIL:
  236. break;
  237. case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
  238. dev_warn(&adapter->ccw_device->dev,
  239. "The error threshold for checksum statistics "
  240. "has been exceeded\n");
  241. zfcp_dbf_hba_berr(adapter->dbf, req);
  242. break;
  243. case FSF_STATUS_READ_LINK_DOWN:
  244. zfcp_fsf_status_read_link_down(req);
  245. break;
  246. case FSF_STATUS_READ_LINK_UP:
  247. dev_info(&adapter->ccw_device->dev,
  248. "The local link has been restored\n");
  249. /* All ports should be marked as ready to run again */
  250. zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
  251. ZFCP_STATUS_COMMON_RUNNING,
  252. ZFCP_SET);
  253. zfcp_erp_adapter_reopen(adapter,
  254. ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  255. ZFCP_STATUS_COMMON_ERP_FAILED,
  256. "fssrh_2", req);
  257. break;
  258. case FSF_STATUS_READ_NOTIFICATION_LOST:
  259. if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
  260. zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
  261. req);
  262. if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
  263. queue_work(adapter->work_queue, &adapter->scan_work);
  264. break;
  265. case FSF_STATUS_READ_CFDC_UPDATED:
  266. zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
  267. break;
  268. case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
  269. adapter->adapter_features = sr_buf->payload.word[0];
  270. break;
  271. }
  272. mempool_free(sr_buf, adapter->pool.status_read_data);
  273. zfcp_fsf_req_free(req);
  274. atomic_inc(&adapter->stat_miss);
  275. queue_work(adapter->work_queue, &adapter->stat_work);
  276. }
  277. static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
  278. {
  279. switch (req->qtcb->header.fsf_status_qual.word[0]) {
  280. case FSF_SQ_FCP_RSP_AVAILABLE:
  281. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  282. case FSF_SQ_NO_RETRY_POSSIBLE:
  283. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  284. return;
  285. case FSF_SQ_COMMAND_ABORTED:
  286. req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
  287. break;
  288. case FSF_SQ_NO_RECOM:
  289. dev_err(&req->adapter->ccw_device->dev,
  290. "The FCP adapter reported a problem "
  291. "that cannot be recovered\n");
  292. zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
  293. break;
  294. }
  295. /* all non-return stats set FSFREQ_ERROR*/
  296. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  297. }
  298. static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
  299. {
  300. if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
  301. return;
  302. switch (req->qtcb->header.fsf_status) {
  303. case FSF_UNKNOWN_COMMAND:
  304. dev_err(&req->adapter->ccw_device->dev,
  305. "The FCP adapter does not recognize the command 0x%x\n",
  306. req->qtcb->header.fsf_command);
  307. zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
  308. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  309. break;
  310. case FSF_ADAPTER_STATUS_AVAILABLE:
  311. zfcp_fsf_fsfstatus_qual_eval(req);
  312. break;
  313. }
  314. }
  315. static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
  316. {
  317. struct zfcp_adapter *adapter = req->adapter;
  318. struct fsf_qtcb *qtcb = req->qtcb;
  319. union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
  320. zfcp_dbf_hba_fsf_response(req);
  321. if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
  322. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  323. ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
  324. return;
  325. }
  326. switch (qtcb->prefix.prot_status) {
  327. case FSF_PROT_GOOD:
  328. case FSF_PROT_FSF_STATUS_PRESENTED:
  329. return;
  330. case FSF_PROT_QTCB_VERSION_ERROR:
  331. dev_err(&adapter->ccw_device->dev,
  332. "QTCB version 0x%x not supported by FCP adapter "
  333. "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
  334. psq->word[0], psq->word[1]);
  335. zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
  336. break;
  337. case FSF_PROT_ERROR_STATE:
  338. case FSF_PROT_SEQ_NUMB_ERROR:
  339. zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
  340. req->status |= ZFCP_STATUS_FSFREQ_RETRY;
  341. break;
  342. case FSF_PROT_UNSUPP_QTCB_TYPE:
  343. dev_err(&adapter->ccw_device->dev,
  344. "The QTCB type is not supported by the FCP adapter\n");
  345. zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
  346. break;
  347. case FSF_PROT_HOST_CONNECTION_INITIALIZING:
  348. atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
  349. &adapter->status);
  350. break;
  351. case FSF_PROT_DUPLICATE_REQUEST_ID:
  352. dev_err(&adapter->ccw_device->dev,
  353. "0x%Lx is an ambiguous request identifier\n",
  354. (unsigned long long)qtcb->bottom.support.req_handle);
  355. zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
  356. break;
  357. case FSF_PROT_LINK_DOWN:
  358. zfcp_fsf_link_down_info_eval(req, "fspse_5",
  359. &psq->link_down_info);
  360. /* FIXME: reopening adapter now? better wait for link up */
  361. zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
  362. break;
  363. case FSF_PROT_REEST_QUEUE:
  364. /* All ports should be marked as ready to run again */
  365. zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
  366. ZFCP_STATUS_COMMON_RUNNING,
  367. ZFCP_SET);
  368. zfcp_erp_adapter_reopen(adapter,
  369. ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  370. ZFCP_STATUS_COMMON_ERP_FAILED,
  371. "fspse_8", req);
  372. break;
  373. default:
  374. dev_err(&adapter->ccw_device->dev,
  375. "0x%x is not a valid transfer protocol status\n",
  376. qtcb->prefix.prot_status);
  377. zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
  378. }
  379. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  380. }
  381. /**
  382. * zfcp_fsf_req_complete - process completion of a FSF request
  383. * @fsf_req: The FSF request that has been completed.
  384. *
  385. * When a request has been completed either from the FCP adapter,
  386. * or it has been dismissed due to a queue shutdown, this function
  387. * is called to process the completion status and trigger further
  388. * events related to the FSF request.
  389. */
  390. static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
  391. {
  392. if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
  393. zfcp_fsf_status_read_handler(req);
  394. return;
  395. }
  396. del_timer(&req->timer);
  397. zfcp_fsf_protstatus_eval(req);
  398. zfcp_fsf_fsfstatus_eval(req);
  399. req->handler(req);
  400. if (req->erp_action)
  401. zfcp_erp_notify(req->erp_action, 0);
  402. if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
  403. zfcp_fsf_req_free(req);
  404. else
  405. complete(&req->completion);
  406. }
  407. /**
  408. * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
  409. * @adapter: pointer to struct zfcp_adapter
  410. *
  411. * Never ever call this without shutting down the adapter first.
  412. * Otherwise the adapter would continue using and corrupting s390 storage.
  413. * Included BUG_ON() call to ensure this is done.
  414. * ERP is supposed to be the only user of this function.
  415. */
  416. void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
  417. {
  418. struct zfcp_fsf_req *req, *tmp;
  419. unsigned long flags;
  420. LIST_HEAD(remove_queue);
  421. unsigned int i;
  422. BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
  423. spin_lock_irqsave(&adapter->req_list_lock, flags);
  424. for (i = 0; i < REQUEST_LIST_SIZE; i++)
  425. list_splice_init(&adapter->req_list[i], &remove_queue);
  426. spin_unlock_irqrestore(&adapter->req_list_lock, flags);
  427. list_for_each_entry_safe(req, tmp, &remove_queue, list) {
  428. list_del(&req->list);
  429. req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
  430. zfcp_fsf_req_complete(req);
  431. }
  432. }
  433. static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
  434. {
  435. struct fsf_qtcb_bottom_config *bottom;
  436. struct zfcp_adapter *adapter = req->adapter;
  437. struct Scsi_Host *shost = adapter->scsi_host;
  438. bottom = &req->qtcb->bottom.config;
  439. if (req->data)
  440. memcpy(req->data, bottom, sizeof(*bottom));
  441. fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
  442. fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
  443. fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
  444. fc_host_speed(shost) = bottom->fc_link_speed;
  445. fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
  446. adapter->hydra_version = bottom->adapter_type;
  447. adapter->timer_ticks = bottom->timer_interval;
  448. if (fc_host_permanent_port_name(shost) == -1)
  449. fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
  450. switch (bottom->fc_topology) {
  451. case FSF_TOPO_P2P:
  452. adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
  453. adapter->peer_wwpn = bottom->plogi_payload.wwpn;
  454. adapter->peer_wwnn = bottom->plogi_payload.wwnn;
  455. fc_host_port_type(shost) = FC_PORTTYPE_PTP;
  456. break;
  457. case FSF_TOPO_FABRIC:
  458. fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
  459. break;
  460. case FSF_TOPO_AL:
  461. fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
  462. /* fall through */
  463. default:
  464. dev_err(&adapter->ccw_device->dev,
  465. "Unknown or unsupported arbitrated loop "
  466. "fibre channel topology detected\n");
  467. zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
  468. return -EIO;
  469. }
  470. return 0;
  471. }
  472. static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
  473. {
  474. struct zfcp_adapter *adapter = req->adapter;
  475. struct fsf_qtcb *qtcb = req->qtcb;
  476. struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
  477. struct Scsi_Host *shost = adapter->scsi_host;
  478. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  479. return;
  480. adapter->fsf_lic_version = bottom->lic_version;
  481. adapter->adapter_features = bottom->adapter_features;
  482. adapter->connection_features = bottom->connection_features;
  483. adapter->peer_wwpn = 0;
  484. adapter->peer_wwnn = 0;
  485. adapter->peer_d_id = 0;
  486. switch (qtcb->header.fsf_status) {
  487. case FSF_GOOD:
  488. if (zfcp_fsf_exchange_config_evaluate(req))
  489. return;
  490. if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
  491. dev_err(&adapter->ccw_device->dev,
  492. "FCP adapter maximum QTCB size (%d bytes) "
  493. "is too small\n",
  494. bottom->max_qtcb_size);
  495. zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
  496. return;
  497. }
  498. atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
  499. &adapter->status);
  500. break;
  501. case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
  502. fc_host_node_name(shost) = 0;
  503. fc_host_port_name(shost) = 0;
  504. fc_host_port_id(shost) = 0;
  505. fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
  506. fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
  507. adapter->hydra_version = 0;
  508. atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
  509. &adapter->status);
  510. zfcp_fsf_link_down_info_eval(req, "fsecdh2",
  511. &qtcb->header.fsf_status_qual.link_down_info);
  512. break;
  513. default:
  514. zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
  515. return;
  516. }
  517. if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
  518. adapter->hardware_version = bottom->hardware_version;
  519. memcpy(fc_host_serial_number(shost), bottom->serial_number,
  520. min(FC_SERIAL_NUMBER_SIZE, 17));
  521. EBCASC(fc_host_serial_number(shost),
  522. min(FC_SERIAL_NUMBER_SIZE, 17));
  523. }
  524. if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
  525. dev_err(&adapter->ccw_device->dev,
  526. "The FCP adapter only supports newer "
  527. "control block versions\n");
  528. zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
  529. return;
  530. }
  531. if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
  532. dev_err(&adapter->ccw_device->dev,
  533. "The FCP adapter only supports older "
  534. "control block versions\n");
  535. zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
  536. }
  537. }
  538. static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
  539. {
  540. struct zfcp_adapter *adapter = req->adapter;
  541. struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
  542. struct Scsi_Host *shost = adapter->scsi_host;
  543. if (req->data)
  544. memcpy(req->data, bottom, sizeof(*bottom));
  545. if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
  546. fc_host_permanent_port_name(shost) = bottom->wwpn;
  547. fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
  548. } else
  549. fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
  550. fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
  551. fc_host_supported_speeds(shost) = bottom->supported_speed;
  552. }
  553. static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
  554. {
  555. struct fsf_qtcb *qtcb = req->qtcb;
  556. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  557. return;
  558. switch (qtcb->header.fsf_status) {
  559. case FSF_GOOD:
  560. zfcp_fsf_exchange_port_evaluate(req);
  561. break;
  562. case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
  563. zfcp_fsf_exchange_port_evaluate(req);
  564. zfcp_fsf_link_down_info_eval(req, "fsepdh1",
  565. &qtcb->header.fsf_status_qual.link_down_info);
  566. break;
  567. }
  568. }
  569. static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
  570. {
  571. struct zfcp_qdio_queue *req_q = &qdio->req_q;
  572. spin_lock_bh(&qdio->req_q_lock);
  573. if (atomic_read(&req_q->count))
  574. return 1;
  575. spin_unlock_bh(&qdio->req_q_lock);
  576. return 0;
  577. }
  578. static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
  579. {
  580. struct zfcp_adapter *adapter = qdio->adapter;
  581. long ret;
  582. spin_unlock_bh(&qdio->req_q_lock);
  583. ret = wait_event_interruptible_timeout(qdio->req_q_wq,
  584. zfcp_fsf_sbal_check(qdio), 5 * HZ);
  585. if (ret > 0)
  586. return 0;
  587. if (!ret) {
  588. atomic_inc(&qdio->req_q_full);
  589. /* assume hanging outbound queue, try queue recovery */
  590. zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
  591. }
  592. spin_lock_bh(&qdio->req_q_lock);
  593. return -EIO;
  594. }
  595. static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
  596. {
  597. struct zfcp_fsf_req *req;
  598. if (likely(pool))
  599. req = mempool_alloc(pool, GFP_ATOMIC);
  600. else
  601. req = kmalloc(sizeof(*req), GFP_ATOMIC);
  602. if (unlikely(!req))
  603. return NULL;
  604. memset(req, 0, sizeof(*req));
  605. req->pool = pool;
  606. return req;
  607. }
  608. static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
  609. {
  610. struct fsf_qtcb *qtcb;
  611. if (likely(pool))
  612. qtcb = mempool_alloc(pool, GFP_ATOMIC);
  613. else
  614. qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
  615. if (unlikely(!qtcb))
  616. return NULL;
  617. memset(qtcb, 0, sizeof(*qtcb));
  618. return qtcb;
  619. }
  620. static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
  621. u32 fsf_cmd, mempool_t *pool)
  622. {
  623. struct qdio_buffer_element *sbale;
  624. struct zfcp_qdio_queue *req_q = &qdio->req_q;
  625. struct zfcp_adapter *adapter = qdio->adapter;
  626. struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
  627. if (unlikely(!req))
  628. return ERR_PTR(-ENOMEM);
  629. if (adapter->req_no == 0)
  630. adapter->req_no++;
  631. INIT_LIST_HEAD(&req->list);
  632. init_timer(&req->timer);
  633. init_completion(&req->completion);
  634. req->adapter = adapter;
  635. req->fsf_command = fsf_cmd;
  636. req->req_id = adapter->req_no;
  637. req->queue_req.sbal_number = 1;
  638. req->queue_req.sbal_first = req_q->first;
  639. req->queue_req.sbal_last = req_q->first;
  640. req->queue_req.sbale_curr = 1;
  641. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  642. sbale[0].addr = (void *) req->req_id;
  643. sbale[0].flags |= SBAL_FLAGS0_COMMAND;
  644. if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
  645. if (likely(pool))
  646. req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
  647. else
  648. req->qtcb = zfcp_qtcb_alloc(NULL);
  649. if (unlikely(!req->qtcb)) {
  650. zfcp_fsf_req_free(req);
  651. return ERR_PTR(-ENOMEM);
  652. }
  653. req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
  654. req->qtcb->prefix.req_id = req->req_id;
  655. req->qtcb->prefix.ulp_info = 26;
  656. req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
  657. req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
  658. req->qtcb->header.req_handle = req->req_id;
  659. req->qtcb->header.fsf_command = req->fsf_command;
  660. req->seq_no = adapter->fsf_req_seq_no;
  661. req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
  662. sbale[1].addr = (void *) req->qtcb;
  663. sbale[1].length = sizeof(struct fsf_qtcb);
  664. }
  665. if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
  666. zfcp_fsf_req_free(req);
  667. return ERR_PTR(-EIO);
  668. }
  669. return req;
  670. }
  671. static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
  672. {
  673. struct zfcp_adapter *adapter = req->adapter;
  674. struct zfcp_qdio *qdio = adapter->qdio;
  675. unsigned long flags;
  676. int idx;
  677. int with_qtcb = (req->qtcb != NULL);
  678. /* put allocated FSF request into hash table */
  679. spin_lock_irqsave(&adapter->req_list_lock, flags);
  680. idx = zfcp_reqlist_hash(req->req_id);
  681. list_add_tail(&req->list, &adapter->req_list[idx]);
  682. spin_unlock_irqrestore(&adapter->req_list_lock, flags);
  683. req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
  684. req->issued = get_clock();
  685. if (zfcp_qdio_send(qdio, &req->queue_req)) {
  686. del_timer(&req->timer);
  687. spin_lock_irqsave(&adapter->req_list_lock, flags);
  688. /* lookup request again, list might have changed */
  689. if (zfcp_reqlist_find_safe(adapter, req))
  690. zfcp_reqlist_remove(adapter, req);
  691. spin_unlock_irqrestore(&adapter->req_list_lock, flags);
  692. zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
  693. return -EIO;
  694. }
  695. /* Don't increase for unsolicited status */
  696. if (with_qtcb)
  697. adapter->fsf_req_seq_no++;
  698. adapter->req_no++;
  699. return 0;
  700. }
  701. /**
  702. * zfcp_fsf_status_read - send status read request
  703. * @adapter: pointer to struct zfcp_adapter
  704. * @req_flags: request flags
  705. * Returns: 0 on success, ERROR otherwise
  706. */
  707. int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
  708. {
  709. struct zfcp_adapter *adapter = qdio->adapter;
  710. struct zfcp_fsf_req *req;
  711. struct fsf_status_read_buffer *sr_buf;
  712. struct qdio_buffer_element *sbale;
  713. int retval = -EIO;
  714. spin_lock_bh(&qdio->req_q_lock);
  715. if (zfcp_fsf_req_sbal_get(qdio))
  716. goto out;
  717. req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
  718. adapter->pool.status_read_req);
  719. if (IS_ERR(req)) {
  720. retval = PTR_ERR(req);
  721. goto out;
  722. }
  723. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  724. sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
  725. req->queue_req.sbale_curr = 2;
  726. sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
  727. if (!sr_buf) {
  728. retval = -ENOMEM;
  729. goto failed_buf;
  730. }
  731. memset(sr_buf, 0, sizeof(*sr_buf));
  732. req->data = sr_buf;
  733. sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
  734. sbale->addr = (void *) sr_buf;
  735. sbale->length = sizeof(*sr_buf);
  736. retval = zfcp_fsf_req_send(req);
  737. if (retval)
  738. goto failed_req_send;
  739. goto out;
  740. failed_req_send:
  741. mempool_free(sr_buf, adapter->pool.status_read_data);
  742. failed_buf:
  743. zfcp_fsf_req_free(req);
  744. zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
  745. out:
  746. spin_unlock_bh(&qdio->req_q_lock);
  747. return retval;
  748. }
  749. static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
  750. {
  751. struct zfcp_unit *unit = req->data;
  752. union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
  753. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  754. return;
  755. switch (req->qtcb->header.fsf_status) {
  756. case FSF_PORT_HANDLE_NOT_VALID:
  757. if (fsq->word[0] == fsq->word[1]) {
  758. zfcp_erp_adapter_reopen(unit->port->adapter, 0,
  759. "fsafch1", req);
  760. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  761. }
  762. break;
  763. case FSF_LUN_HANDLE_NOT_VALID:
  764. if (fsq->word[0] == fsq->word[1]) {
  765. zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
  766. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  767. }
  768. break;
  769. case FSF_FCP_COMMAND_DOES_NOT_EXIST:
  770. req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
  771. break;
  772. case FSF_PORT_BOXED:
  773. zfcp_erp_port_boxed(unit->port, "fsafch3", req);
  774. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  775. ZFCP_STATUS_FSFREQ_RETRY;
  776. break;
  777. case FSF_LUN_BOXED:
  778. zfcp_erp_unit_boxed(unit, "fsafch4", req);
  779. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  780. ZFCP_STATUS_FSFREQ_RETRY;
  781. break;
  782. case FSF_ADAPTER_STATUS_AVAILABLE:
  783. switch (fsq->word[0]) {
  784. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  785. zfcp_fc_test_link(unit->port);
  786. /* fall through */
  787. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  788. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  789. break;
  790. }
  791. break;
  792. case FSF_GOOD:
  793. req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
  794. break;
  795. }
  796. }
  797. /**
  798. * zfcp_fsf_abort_fcp_command - abort running SCSI command
  799. * @old_req_id: unsigned long
  800. * @unit: pointer to struct zfcp_unit
  801. * Returns: pointer to struct zfcp_fsf_req
  802. */
  803. struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
  804. struct zfcp_unit *unit)
  805. {
  806. struct qdio_buffer_element *sbale;
  807. struct zfcp_fsf_req *req = NULL;
  808. struct zfcp_qdio *qdio = unit->port->adapter->qdio;
  809. spin_lock_bh(&qdio->req_q_lock);
  810. if (zfcp_fsf_req_sbal_get(qdio))
  811. goto out;
  812. req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
  813. qdio->adapter->pool.scsi_abort);
  814. if (IS_ERR(req)) {
  815. req = NULL;
  816. goto out;
  817. }
  818. if (unlikely(!(atomic_read(&unit->status) &
  819. ZFCP_STATUS_COMMON_UNBLOCKED)))
  820. goto out_error_free;
  821. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  822. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  823. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  824. req->data = unit;
  825. req->handler = zfcp_fsf_abort_fcp_command_handler;
  826. req->qtcb->header.lun_handle = unit->handle;
  827. req->qtcb->header.port_handle = unit->port->handle;
  828. req->qtcb->bottom.support.req_handle = (u64) old_req_id;
  829. zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
  830. if (!zfcp_fsf_req_send(req))
  831. goto out;
  832. out_error_free:
  833. zfcp_fsf_req_free(req);
  834. req = NULL;
  835. out:
  836. spin_unlock_bh(&qdio->req_q_lock);
  837. return req;
  838. }
  839. static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
  840. {
  841. struct zfcp_adapter *adapter = req->adapter;
  842. struct zfcp_send_ct *send_ct = req->data;
  843. struct fsf_qtcb_header *header = &req->qtcb->header;
  844. send_ct->status = -EINVAL;
  845. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  846. goto skip_fsfstatus;
  847. switch (header->fsf_status) {
  848. case FSF_GOOD:
  849. zfcp_dbf_san_ct_response(req);
  850. send_ct->status = 0;
  851. break;
  852. case FSF_SERVICE_CLASS_NOT_SUPPORTED:
  853. zfcp_fsf_class_not_supp(req);
  854. break;
  855. case FSF_ADAPTER_STATUS_AVAILABLE:
  856. switch (header->fsf_status_qual.word[0]){
  857. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  858. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  859. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  860. break;
  861. }
  862. break;
  863. case FSF_ACCESS_DENIED:
  864. break;
  865. case FSF_PORT_BOXED:
  866. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  867. ZFCP_STATUS_FSFREQ_RETRY;
  868. break;
  869. case FSF_PORT_HANDLE_NOT_VALID:
  870. zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
  871. /* fall through */
  872. case FSF_GENERIC_COMMAND_REJECTED:
  873. case FSF_PAYLOAD_SIZE_MISMATCH:
  874. case FSF_REQUEST_SIZE_TOO_LARGE:
  875. case FSF_RESPONSE_SIZE_TOO_LARGE:
  876. case FSF_SBAL_MISMATCH:
  877. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  878. break;
  879. }
  880. skip_fsfstatus:
  881. if (send_ct->handler)
  882. send_ct->handler(send_ct->handler_data);
  883. }
  884. static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
  885. struct scatterlist *sg_req,
  886. struct scatterlist *sg_resp)
  887. {
  888. sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
  889. sbale[2].addr = sg_virt(sg_req);
  890. sbale[2].length = sg_req->length;
  891. sbale[3].addr = sg_virt(sg_resp);
  892. sbale[3].length = sg_resp->length;
  893. sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
  894. }
  895. static int zfcp_fsf_one_sbal(struct scatterlist *sg)
  896. {
  897. return sg_is_last(sg) && sg->length <= PAGE_SIZE;
  898. }
  899. static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
  900. struct scatterlist *sg_req,
  901. struct scatterlist *sg_resp,
  902. int max_sbals)
  903. {
  904. struct zfcp_adapter *adapter = req->adapter;
  905. struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
  906. &req->queue_req);
  907. u32 feat = adapter->adapter_features;
  908. int bytes;
  909. if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
  910. if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
  911. return -EOPNOTSUPP;
  912. zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
  913. return 0;
  914. }
  915. /* use single, unchained SBAL if it can hold the request */
  916. if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
  917. zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
  918. return 0;
  919. }
  920. bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
  921. SBAL_FLAGS0_TYPE_WRITE_READ,
  922. sg_req, max_sbals);
  923. if (bytes <= 0)
  924. return -EIO;
  925. req->qtcb->bottom.support.req_buf_length = bytes;
  926. req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
  927. bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
  928. SBAL_FLAGS0_TYPE_WRITE_READ,
  929. sg_resp, max_sbals);
  930. req->qtcb->bottom.support.resp_buf_length = bytes;
  931. if (bytes <= 0)
  932. return -EIO;
  933. return 0;
  934. }
  935. static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
  936. struct scatterlist *sg_req,
  937. struct scatterlist *sg_resp,
  938. int max_sbals)
  939. {
  940. int ret;
  941. ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
  942. if (ret)
  943. return ret;
  944. /* common settings for ct/gs and els requests */
  945. req->qtcb->bottom.support.service_class = FSF_CLASS_3;
  946. req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
  947. zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ);
  948. return 0;
  949. }
  950. /**
  951. * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
  952. * @ct: pointer to struct zfcp_send_ct with data for request
  953. * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
  954. */
  955. int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
  956. {
  957. struct zfcp_wka_port *wka_port = ct->wka_port;
  958. struct zfcp_qdio *qdio = wka_port->adapter->qdio;
  959. struct zfcp_fsf_req *req;
  960. int ret = -EIO;
  961. spin_lock_bh(&qdio->req_q_lock);
  962. if (zfcp_fsf_req_sbal_get(qdio))
  963. goto out;
  964. req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
  965. if (IS_ERR(req)) {
  966. ret = PTR_ERR(req);
  967. goto out;
  968. }
  969. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  970. ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
  971. FSF_MAX_SBALS_PER_REQ);
  972. if (ret)
  973. goto failed_send;
  974. req->handler = zfcp_fsf_send_ct_handler;
  975. req->qtcb->header.port_handle = wka_port->handle;
  976. req->data = ct;
  977. zfcp_dbf_san_ct_request(req);
  978. ret = zfcp_fsf_req_send(req);
  979. if (ret)
  980. goto failed_send;
  981. goto out;
  982. failed_send:
  983. zfcp_fsf_req_free(req);
  984. out:
  985. spin_unlock_bh(&qdio->req_q_lock);
  986. return ret;
  987. }
  988. static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
  989. {
  990. struct zfcp_send_els *send_els = req->data;
  991. struct zfcp_port *port = send_els->port;
  992. struct fsf_qtcb_header *header = &req->qtcb->header;
  993. send_els->status = -EINVAL;
  994. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  995. goto skip_fsfstatus;
  996. switch (header->fsf_status) {
  997. case FSF_GOOD:
  998. zfcp_dbf_san_els_response(req);
  999. send_els->status = 0;
  1000. break;
  1001. case FSF_SERVICE_CLASS_NOT_SUPPORTED:
  1002. zfcp_fsf_class_not_supp(req);
  1003. break;
  1004. case FSF_ADAPTER_STATUS_AVAILABLE:
  1005. switch (header->fsf_status_qual.word[0]){
  1006. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  1007. if (port && (send_els->ls_code != ZFCP_LS_ADISC))
  1008. zfcp_fc_test_link(port);
  1009. /*fall through */
  1010. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  1011. case FSF_SQ_RETRY_IF_POSSIBLE:
  1012. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1013. break;
  1014. }
  1015. break;
  1016. case FSF_ELS_COMMAND_REJECTED:
  1017. case FSF_PAYLOAD_SIZE_MISMATCH:
  1018. case FSF_REQUEST_SIZE_TOO_LARGE:
  1019. case FSF_RESPONSE_SIZE_TOO_LARGE:
  1020. break;
  1021. case FSF_ACCESS_DENIED:
  1022. if (port)
  1023. zfcp_fsf_access_denied_port(req, port);
  1024. break;
  1025. case FSF_SBAL_MISMATCH:
  1026. /* should never occure, avoided in zfcp_fsf_send_els */
  1027. /* fall through */
  1028. default:
  1029. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1030. break;
  1031. }
  1032. skip_fsfstatus:
  1033. if (send_els->handler)
  1034. send_els->handler(send_els->handler_data);
  1035. }
  1036. /**
  1037. * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
  1038. * @els: pointer to struct zfcp_send_els with data for the command
  1039. */
  1040. int zfcp_fsf_send_els(struct zfcp_send_els *els)
  1041. {
  1042. struct zfcp_fsf_req *req;
  1043. struct zfcp_qdio *qdio = els->adapter->qdio;
  1044. int ret = -EIO;
  1045. spin_lock_bh(&qdio->req_q_lock);
  1046. if (zfcp_fsf_req_sbal_get(qdio))
  1047. goto out;
  1048. req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
  1049. if (IS_ERR(req)) {
  1050. ret = PTR_ERR(req);
  1051. goto out;
  1052. }
  1053. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1054. ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
  1055. if (ret)
  1056. goto failed_send;
  1057. req->qtcb->bottom.support.d_id = els->d_id;
  1058. req->handler = zfcp_fsf_send_els_handler;
  1059. req->data = els;
  1060. zfcp_dbf_san_els_request(req);
  1061. ret = zfcp_fsf_req_send(req);
  1062. if (ret)
  1063. goto failed_send;
  1064. goto out;
  1065. failed_send:
  1066. zfcp_fsf_req_free(req);
  1067. out:
  1068. spin_unlock_bh(&qdio->req_q_lock);
  1069. return ret;
  1070. }
  1071. int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
  1072. {
  1073. struct qdio_buffer_element *sbale;
  1074. struct zfcp_fsf_req *req;
  1075. struct zfcp_qdio *qdio = erp_action->adapter->qdio;
  1076. int retval = -EIO;
  1077. spin_lock_bh(&qdio->req_q_lock);
  1078. if (zfcp_fsf_req_sbal_get(qdio))
  1079. goto out;
  1080. req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
  1081. qdio->adapter->pool.erp_req);
  1082. if (IS_ERR(req)) {
  1083. retval = PTR_ERR(req);
  1084. goto out;
  1085. }
  1086. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1087. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1088. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1089. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1090. req->qtcb->bottom.config.feature_selection =
  1091. FSF_FEATURE_CFDC |
  1092. FSF_FEATURE_LUN_SHARING |
  1093. FSF_FEATURE_NOTIFICATION_LOST |
  1094. FSF_FEATURE_UPDATE_ALERT;
  1095. req->erp_action = erp_action;
  1096. req->handler = zfcp_fsf_exchange_config_data_handler;
  1097. erp_action->fsf_req = req;
  1098. zfcp_fsf_start_erp_timer(req);
  1099. retval = zfcp_fsf_req_send(req);
  1100. if (retval) {
  1101. zfcp_fsf_req_free(req);
  1102. erp_action->fsf_req = NULL;
  1103. }
  1104. out:
  1105. spin_unlock_bh(&qdio->req_q_lock);
  1106. return retval;
  1107. }
  1108. int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
  1109. struct fsf_qtcb_bottom_config *data)
  1110. {
  1111. struct qdio_buffer_element *sbale;
  1112. struct zfcp_fsf_req *req = NULL;
  1113. int retval = -EIO;
  1114. spin_lock_bh(&qdio->req_q_lock);
  1115. if (zfcp_fsf_req_sbal_get(qdio))
  1116. goto out_unlock;
  1117. req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
  1118. if (IS_ERR(req)) {
  1119. retval = PTR_ERR(req);
  1120. goto out_unlock;
  1121. }
  1122. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1123. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1124. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1125. req->handler = zfcp_fsf_exchange_config_data_handler;
  1126. req->qtcb->bottom.config.feature_selection =
  1127. FSF_FEATURE_CFDC |
  1128. FSF_FEATURE_LUN_SHARING |
  1129. FSF_FEATURE_NOTIFICATION_LOST |
  1130. FSF_FEATURE_UPDATE_ALERT;
  1131. if (data)
  1132. req->data = data;
  1133. zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
  1134. retval = zfcp_fsf_req_send(req);
  1135. spin_unlock_bh(&qdio->req_q_lock);
  1136. if (!retval)
  1137. wait_for_completion(&req->completion);
  1138. zfcp_fsf_req_free(req);
  1139. return retval;
  1140. out_unlock:
  1141. spin_unlock_bh(&qdio->req_q_lock);
  1142. return retval;
  1143. }
  1144. /**
  1145. * zfcp_fsf_exchange_port_data - request information about local port
  1146. * @erp_action: ERP action for the adapter for which port data is requested
  1147. * Returns: 0 on success, error otherwise
  1148. */
  1149. int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
  1150. {
  1151. struct zfcp_qdio *qdio = erp_action->adapter->qdio;
  1152. struct qdio_buffer_element *sbale;
  1153. struct zfcp_fsf_req *req;
  1154. int retval = -EIO;
  1155. if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
  1156. return -EOPNOTSUPP;
  1157. spin_lock_bh(&qdio->req_q_lock);
  1158. if (zfcp_fsf_req_sbal_get(qdio))
  1159. goto out;
  1160. req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
  1161. qdio->adapter->pool.erp_req);
  1162. if (IS_ERR(req)) {
  1163. retval = PTR_ERR(req);
  1164. goto out;
  1165. }
  1166. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1167. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1168. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1169. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1170. req->handler = zfcp_fsf_exchange_port_data_handler;
  1171. req->erp_action = erp_action;
  1172. erp_action->fsf_req = req;
  1173. zfcp_fsf_start_erp_timer(req);
  1174. retval = zfcp_fsf_req_send(req);
  1175. if (retval) {
  1176. zfcp_fsf_req_free(req);
  1177. erp_action->fsf_req = NULL;
  1178. }
  1179. out:
  1180. spin_unlock_bh(&qdio->req_q_lock);
  1181. return retval;
  1182. }
  1183. /**
  1184. * zfcp_fsf_exchange_port_data_sync - request information about local port
  1185. * @qdio: pointer to struct zfcp_qdio
  1186. * @data: pointer to struct fsf_qtcb_bottom_port
  1187. * Returns: 0 on success, error otherwise
  1188. */
  1189. int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
  1190. struct fsf_qtcb_bottom_port *data)
  1191. {
  1192. struct qdio_buffer_element *sbale;
  1193. struct zfcp_fsf_req *req = NULL;
  1194. int retval = -EIO;
  1195. if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
  1196. return -EOPNOTSUPP;
  1197. spin_lock_bh(&qdio->req_q_lock);
  1198. if (zfcp_fsf_req_sbal_get(qdio))
  1199. goto out_unlock;
  1200. req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
  1201. if (IS_ERR(req)) {
  1202. retval = PTR_ERR(req);
  1203. goto out_unlock;
  1204. }
  1205. if (data)
  1206. req->data = data;
  1207. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1208. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1209. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1210. req->handler = zfcp_fsf_exchange_port_data_handler;
  1211. zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
  1212. retval = zfcp_fsf_req_send(req);
  1213. spin_unlock_bh(&qdio->req_q_lock);
  1214. if (!retval)
  1215. wait_for_completion(&req->completion);
  1216. zfcp_fsf_req_free(req);
  1217. return retval;
  1218. out_unlock:
  1219. spin_unlock_bh(&qdio->req_q_lock);
  1220. return retval;
  1221. }
  1222. static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
  1223. {
  1224. struct zfcp_port *port = req->data;
  1225. struct fsf_qtcb_header *header = &req->qtcb->header;
  1226. struct fsf_plogi *plogi;
  1227. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  1228. goto out;
  1229. switch (header->fsf_status) {
  1230. case FSF_PORT_ALREADY_OPEN:
  1231. break;
  1232. case FSF_ACCESS_DENIED:
  1233. zfcp_fsf_access_denied_port(req, port);
  1234. break;
  1235. case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
  1236. dev_warn(&req->adapter->ccw_device->dev,
  1237. "Not enough FCP adapter resources to open "
  1238. "remote port 0x%016Lx\n",
  1239. (unsigned long long)port->wwpn);
  1240. zfcp_erp_port_failed(port, "fsoph_1", req);
  1241. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1242. break;
  1243. case FSF_ADAPTER_STATUS_AVAILABLE:
  1244. switch (header->fsf_status_qual.word[0]) {
  1245. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  1246. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  1247. case FSF_SQ_NO_RETRY_POSSIBLE:
  1248. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1249. break;
  1250. }
  1251. break;
  1252. case FSF_GOOD:
  1253. port->handle = header->port_handle;
  1254. atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
  1255. ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
  1256. atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
  1257. ZFCP_STATUS_COMMON_ACCESS_BOXED,
  1258. &port->status);
  1259. /* check whether D_ID has changed during open */
  1260. /*
  1261. * FIXME: This check is not airtight, as the FCP channel does
  1262. * not monitor closures of target port connections caused on
  1263. * the remote side. Thus, they might miss out on invalidating
  1264. * locally cached WWPNs (and other N_Port parameters) of gone
  1265. * target ports. So, our heroic attempt to make things safe
  1266. * could be undermined by 'open port' response data tagged with
  1267. * obsolete WWPNs. Another reason to monitor potential
  1268. * connection closures ourself at least (by interpreting
  1269. * incoming ELS' and unsolicited status). It just crosses my
  1270. * mind that one should be able to cross-check by means of
  1271. * another GID_PN straight after a port has been opened.
  1272. * Alternately, an ADISC/PDISC ELS should suffice, as well.
  1273. */
  1274. plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
  1275. if (req->qtcb->bottom.support.els1_length >=
  1276. FSF_PLOGI_MIN_LEN) {
  1277. if (plogi->serv_param.wwpn != port->wwpn) {
  1278. port->d_id = 0;
  1279. dev_warn(&port->adapter->ccw_device->dev,
  1280. "A port opened with WWPN 0x%016Lx "
  1281. "returned data that identifies it as "
  1282. "WWPN 0x%016Lx\n",
  1283. (unsigned long long) port->wwpn,
  1284. (unsigned long long)
  1285. plogi->serv_param.wwpn);
  1286. } else {
  1287. port->wwnn = plogi->serv_param.wwnn;
  1288. zfcp_fc_plogi_evaluate(port, plogi);
  1289. }
  1290. }
  1291. break;
  1292. case FSF_UNKNOWN_OP_SUBTYPE:
  1293. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1294. break;
  1295. }
  1296. out:
  1297. put_device(&port->sysfs_device);
  1298. }
  1299. /**
  1300. * zfcp_fsf_open_port - create and send open port request
  1301. * @erp_action: pointer to struct zfcp_erp_action
  1302. * Returns: 0 on success, error otherwise
  1303. */
  1304. int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
  1305. {
  1306. struct qdio_buffer_element *sbale;
  1307. struct zfcp_qdio *qdio = erp_action->adapter->qdio;
  1308. struct zfcp_port *port = erp_action->port;
  1309. struct zfcp_fsf_req *req;
  1310. int retval = -EIO;
  1311. spin_lock_bh(&qdio->req_q_lock);
  1312. if (zfcp_fsf_req_sbal_get(qdio))
  1313. goto out;
  1314. req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
  1315. qdio->adapter->pool.erp_req);
  1316. if (IS_ERR(req)) {
  1317. retval = PTR_ERR(req);
  1318. goto out;
  1319. }
  1320. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1321. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1322. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1323. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1324. req->handler = zfcp_fsf_open_port_handler;
  1325. req->qtcb->bottom.support.d_id = port->d_id;
  1326. req->data = port;
  1327. req->erp_action = erp_action;
  1328. erp_action->fsf_req = req;
  1329. get_device(&port->sysfs_device);
  1330. zfcp_fsf_start_erp_timer(req);
  1331. retval = zfcp_fsf_req_send(req);
  1332. if (retval) {
  1333. zfcp_fsf_req_free(req);
  1334. erp_action->fsf_req = NULL;
  1335. put_device(&port->sysfs_device);
  1336. }
  1337. out:
  1338. spin_unlock_bh(&qdio->req_q_lock);
  1339. return retval;
  1340. }
  1341. static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
  1342. {
  1343. struct zfcp_port *port = req->data;
  1344. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  1345. return;
  1346. switch (req->qtcb->header.fsf_status) {
  1347. case FSF_PORT_HANDLE_NOT_VALID:
  1348. zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
  1349. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1350. break;
  1351. case FSF_ADAPTER_STATUS_AVAILABLE:
  1352. break;
  1353. case FSF_GOOD:
  1354. zfcp_erp_modify_port_status(port, "fscph_2", req,
  1355. ZFCP_STATUS_COMMON_OPEN,
  1356. ZFCP_CLEAR);
  1357. break;
  1358. }
  1359. }
  1360. /**
  1361. * zfcp_fsf_close_port - create and send close port request
  1362. * @erp_action: pointer to struct zfcp_erp_action
  1363. * Returns: 0 on success, error otherwise
  1364. */
  1365. int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
  1366. {
  1367. struct qdio_buffer_element *sbale;
  1368. struct zfcp_qdio *qdio = erp_action->adapter->qdio;
  1369. struct zfcp_fsf_req *req;
  1370. int retval = -EIO;
  1371. spin_lock_bh(&qdio->req_q_lock);
  1372. if (zfcp_fsf_req_sbal_get(qdio))
  1373. goto out;
  1374. req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
  1375. qdio->adapter->pool.erp_req);
  1376. if (IS_ERR(req)) {
  1377. retval = PTR_ERR(req);
  1378. goto out;
  1379. }
  1380. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1381. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1382. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1383. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1384. req->handler = zfcp_fsf_close_port_handler;
  1385. req->data = erp_action->port;
  1386. req->erp_action = erp_action;
  1387. req->qtcb->header.port_handle = erp_action->port->handle;
  1388. erp_action->fsf_req = req;
  1389. zfcp_fsf_start_erp_timer(req);
  1390. retval = zfcp_fsf_req_send(req);
  1391. if (retval) {
  1392. zfcp_fsf_req_free(req);
  1393. erp_action->fsf_req = NULL;
  1394. }
  1395. out:
  1396. spin_unlock_bh(&qdio->req_q_lock);
  1397. return retval;
  1398. }
  1399. static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
  1400. {
  1401. struct zfcp_wka_port *wka_port = req->data;
  1402. struct fsf_qtcb_header *header = &req->qtcb->header;
  1403. if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
  1404. wka_port->status = ZFCP_WKA_PORT_OFFLINE;
  1405. goto out;
  1406. }
  1407. switch (header->fsf_status) {
  1408. case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
  1409. dev_warn(&req->adapter->ccw_device->dev,
  1410. "Opening WKA port 0x%x failed\n", wka_port->d_id);
  1411. /* fall through */
  1412. case FSF_ADAPTER_STATUS_AVAILABLE:
  1413. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1414. /* fall through */
  1415. case FSF_ACCESS_DENIED:
  1416. wka_port->status = ZFCP_WKA_PORT_OFFLINE;
  1417. break;
  1418. case FSF_GOOD:
  1419. wka_port->handle = header->port_handle;
  1420. /* fall through */
  1421. case FSF_PORT_ALREADY_OPEN:
  1422. wka_port->status = ZFCP_WKA_PORT_ONLINE;
  1423. }
  1424. out:
  1425. wake_up(&wka_port->completion_wq);
  1426. }
  1427. /**
  1428. * zfcp_fsf_open_wka_port - create and send open wka-port request
  1429. * @wka_port: pointer to struct zfcp_wka_port
  1430. * Returns: 0 on success, error otherwise
  1431. */
  1432. int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
  1433. {
  1434. struct qdio_buffer_element *sbale;
  1435. struct zfcp_qdio *qdio = wka_port->adapter->qdio;
  1436. struct zfcp_fsf_req *req;
  1437. int retval = -EIO;
  1438. spin_lock_bh(&qdio->req_q_lock);
  1439. if (zfcp_fsf_req_sbal_get(qdio))
  1440. goto out;
  1441. req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
  1442. qdio->adapter->pool.erp_req);
  1443. if (unlikely(IS_ERR(req))) {
  1444. retval = PTR_ERR(req);
  1445. goto out;
  1446. }
  1447. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1448. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1449. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1450. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1451. req->handler = zfcp_fsf_open_wka_port_handler;
  1452. req->qtcb->bottom.support.d_id = wka_port->d_id;
  1453. req->data = wka_port;
  1454. zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
  1455. retval = zfcp_fsf_req_send(req);
  1456. if (retval)
  1457. zfcp_fsf_req_free(req);
  1458. out:
  1459. spin_unlock_bh(&qdio->req_q_lock);
  1460. return retval;
  1461. }
  1462. static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
  1463. {
  1464. struct zfcp_wka_port *wka_port = req->data;
  1465. if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
  1466. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1467. zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
  1468. }
  1469. wka_port->status = ZFCP_WKA_PORT_OFFLINE;
  1470. wake_up(&wka_port->completion_wq);
  1471. }
  1472. /**
  1473. * zfcp_fsf_close_wka_port - create and send close wka port request
  1474. * @erp_action: pointer to struct zfcp_erp_action
  1475. * Returns: 0 on success, error otherwise
  1476. */
  1477. int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
  1478. {
  1479. struct qdio_buffer_element *sbale;
  1480. struct zfcp_qdio *qdio = wka_port->adapter->qdio;
  1481. struct zfcp_fsf_req *req;
  1482. int retval = -EIO;
  1483. spin_lock_bh(&qdio->req_q_lock);
  1484. if (zfcp_fsf_req_sbal_get(qdio))
  1485. goto out;
  1486. req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
  1487. qdio->adapter->pool.erp_req);
  1488. if (unlikely(IS_ERR(req))) {
  1489. retval = PTR_ERR(req);
  1490. goto out;
  1491. }
  1492. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1493. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1494. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1495. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1496. req->handler = zfcp_fsf_close_wka_port_handler;
  1497. req->data = wka_port;
  1498. req->qtcb->header.port_handle = wka_port->handle;
  1499. zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
  1500. retval = zfcp_fsf_req_send(req);
  1501. if (retval)
  1502. zfcp_fsf_req_free(req);
  1503. out:
  1504. spin_unlock_bh(&qdio->req_q_lock);
  1505. return retval;
  1506. }
  1507. static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
  1508. {
  1509. struct zfcp_port *port = req->data;
  1510. struct fsf_qtcb_header *header = &req->qtcb->header;
  1511. struct zfcp_unit *unit;
  1512. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  1513. return;
  1514. switch (header->fsf_status) {
  1515. case FSF_PORT_HANDLE_NOT_VALID:
  1516. zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
  1517. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1518. break;
  1519. case FSF_ACCESS_DENIED:
  1520. zfcp_fsf_access_denied_port(req, port);
  1521. break;
  1522. case FSF_PORT_BOXED:
  1523. /* can't use generic zfcp_erp_modify_port_status because
  1524. * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
  1525. atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
  1526. read_lock(&port->unit_list_lock);
  1527. list_for_each_entry(unit, &port->unit_list, list)
  1528. atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
  1529. &unit->status);
  1530. read_unlock(&port->unit_list_lock);
  1531. zfcp_erp_port_boxed(port, "fscpph2", req);
  1532. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  1533. ZFCP_STATUS_FSFREQ_RETRY;
  1534. break;
  1535. case FSF_ADAPTER_STATUS_AVAILABLE:
  1536. switch (header->fsf_status_qual.word[0]) {
  1537. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  1538. /* fall through */
  1539. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  1540. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1541. break;
  1542. }
  1543. break;
  1544. case FSF_GOOD:
  1545. /* can't use generic zfcp_erp_modify_port_status because
  1546. * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
  1547. */
  1548. atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
  1549. read_lock(&port->unit_list_lock);
  1550. list_for_each_entry(unit, &port->unit_list, list)
  1551. atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
  1552. &unit->status);
  1553. read_unlock(&port->unit_list_lock);
  1554. break;
  1555. }
  1556. }
  1557. /**
  1558. * zfcp_fsf_close_physical_port - close physical port
  1559. * @erp_action: pointer to struct zfcp_erp_action
  1560. * Returns: 0 on success
  1561. */
  1562. int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
  1563. {
  1564. struct qdio_buffer_element *sbale;
  1565. struct zfcp_qdio *qdio = erp_action->adapter->qdio;
  1566. struct zfcp_fsf_req *req;
  1567. int retval = -EIO;
  1568. spin_lock_bh(&qdio->req_q_lock);
  1569. if (zfcp_fsf_req_sbal_get(qdio))
  1570. goto out;
  1571. req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
  1572. qdio->adapter->pool.erp_req);
  1573. if (IS_ERR(req)) {
  1574. retval = PTR_ERR(req);
  1575. goto out;
  1576. }
  1577. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1578. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1579. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1580. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1581. req->data = erp_action->port;
  1582. req->qtcb->header.port_handle = erp_action->port->handle;
  1583. req->erp_action = erp_action;
  1584. req->handler = zfcp_fsf_close_physical_port_handler;
  1585. erp_action->fsf_req = req;
  1586. zfcp_fsf_start_erp_timer(req);
  1587. retval = zfcp_fsf_req_send(req);
  1588. if (retval) {
  1589. zfcp_fsf_req_free(req);
  1590. erp_action->fsf_req = NULL;
  1591. }
  1592. out:
  1593. spin_unlock_bh(&qdio->req_q_lock);
  1594. return retval;
  1595. }
  1596. static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
  1597. {
  1598. struct zfcp_adapter *adapter = req->adapter;
  1599. struct zfcp_unit *unit = req->data;
  1600. struct fsf_qtcb_header *header = &req->qtcb->header;
  1601. struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
  1602. struct fsf_queue_designator *queue_designator =
  1603. &header->fsf_status_qual.fsf_queue_designator;
  1604. int exclusive, readwrite;
  1605. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  1606. return;
  1607. atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
  1608. ZFCP_STATUS_COMMON_ACCESS_BOXED |
  1609. ZFCP_STATUS_UNIT_SHARED |
  1610. ZFCP_STATUS_UNIT_READONLY,
  1611. &unit->status);
  1612. switch (header->fsf_status) {
  1613. case FSF_PORT_HANDLE_NOT_VALID:
  1614. zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
  1615. /* fall through */
  1616. case FSF_LUN_ALREADY_OPEN:
  1617. break;
  1618. case FSF_ACCESS_DENIED:
  1619. zfcp_fsf_access_denied_unit(req, unit);
  1620. atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
  1621. atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
  1622. break;
  1623. case FSF_PORT_BOXED:
  1624. zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
  1625. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  1626. ZFCP_STATUS_FSFREQ_RETRY;
  1627. break;
  1628. case FSF_LUN_SHARING_VIOLATION:
  1629. if (header->fsf_status_qual.word[0])
  1630. dev_warn(&adapter->ccw_device->dev,
  1631. "LUN 0x%Lx on port 0x%Lx is already in "
  1632. "use by CSS%d, MIF Image ID %x\n",
  1633. (unsigned long long)unit->fcp_lun,
  1634. (unsigned long long)unit->port->wwpn,
  1635. queue_designator->cssid,
  1636. queue_designator->hla);
  1637. else
  1638. zfcp_act_eval_err(adapter,
  1639. header->fsf_status_qual.word[2]);
  1640. zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
  1641. atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
  1642. atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
  1643. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1644. break;
  1645. case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
  1646. dev_warn(&adapter->ccw_device->dev,
  1647. "No handle is available for LUN "
  1648. "0x%016Lx on port 0x%016Lx\n",
  1649. (unsigned long long)unit->fcp_lun,
  1650. (unsigned long long)unit->port->wwpn);
  1651. zfcp_erp_unit_failed(unit, "fsouh_4", req);
  1652. /* fall through */
  1653. case FSF_INVALID_COMMAND_OPTION:
  1654. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1655. break;
  1656. case FSF_ADAPTER_STATUS_AVAILABLE:
  1657. switch (header->fsf_status_qual.word[0]) {
  1658. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  1659. zfcp_fc_test_link(unit->port);
  1660. /* fall through */
  1661. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  1662. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1663. break;
  1664. }
  1665. break;
  1666. case FSF_GOOD:
  1667. unit->handle = header->lun_handle;
  1668. atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
  1669. if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
  1670. (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
  1671. !zfcp_ccw_priv_sch(adapter)) {
  1672. exclusive = (bottom->lun_access_info &
  1673. FSF_UNIT_ACCESS_EXCLUSIVE);
  1674. readwrite = (bottom->lun_access_info &
  1675. FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
  1676. if (!exclusive)
  1677. atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
  1678. &unit->status);
  1679. if (!readwrite) {
  1680. atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
  1681. &unit->status);
  1682. dev_info(&adapter->ccw_device->dev,
  1683. "SCSI device at LUN 0x%016Lx on port "
  1684. "0x%016Lx opened read-only\n",
  1685. (unsigned long long)unit->fcp_lun,
  1686. (unsigned long long)unit->port->wwpn);
  1687. }
  1688. if (exclusive && !readwrite) {
  1689. dev_err(&adapter->ccw_device->dev,
  1690. "Exclusive read-only access not "
  1691. "supported (unit 0x%016Lx, "
  1692. "port 0x%016Lx)\n",
  1693. (unsigned long long)unit->fcp_lun,
  1694. (unsigned long long)unit->port->wwpn);
  1695. zfcp_erp_unit_failed(unit, "fsouh_5", req);
  1696. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1697. zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
  1698. } else if (!exclusive && readwrite) {
  1699. dev_err(&adapter->ccw_device->dev,
  1700. "Shared read-write access not "
  1701. "supported (unit 0x%016Lx, port "
  1702. "0x%016Lx)\n",
  1703. (unsigned long long)unit->fcp_lun,
  1704. (unsigned long long)unit->port->wwpn);
  1705. zfcp_erp_unit_failed(unit, "fsouh_7", req);
  1706. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1707. zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
  1708. }
  1709. }
  1710. break;
  1711. }
  1712. }
  1713. /**
  1714. * zfcp_fsf_open_unit - open unit
  1715. * @erp_action: pointer to struct zfcp_erp_action
  1716. * Returns: 0 on success, error otherwise
  1717. */
  1718. int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
  1719. {
  1720. struct qdio_buffer_element *sbale;
  1721. struct zfcp_adapter *adapter = erp_action->adapter;
  1722. struct zfcp_qdio *qdio = adapter->qdio;
  1723. struct zfcp_fsf_req *req;
  1724. int retval = -EIO;
  1725. spin_lock_bh(&qdio->req_q_lock);
  1726. if (zfcp_fsf_req_sbal_get(qdio))
  1727. goto out;
  1728. req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
  1729. adapter->pool.erp_req);
  1730. if (IS_ERR(req)) {
  1731. retval = PTR_ERR(req);
  1732. goto out;
  1733. }
  1734. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1735. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1736. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1737. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1738. req->qtcb->header.port_handle = erp_action->port->handle;
  1739. req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
  1740. req->handler = zfcp_fsf_open_unit_handler;
  1741. req->data = erp_action->unit;
  1742. req->erp_action = erp_action;
  1743. erp_action->fsf_req = req;
  1744. if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
  1745. req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
  1746. zfcp_fsf_start_erp_timer(req);
  1747. retval = zfcp_fsf_req_send(req);
  1748. if (retval) {
  1749. zfcp_fsf_req_free(req);
  1750. erp_action->fsf_req = NULL;
  1751. }
  1752. out:
  1753. spin_unlock_bh(&qdio->req_q_lock);
  1754. return retval;
  1755. }
  1756. static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
  1757. {
  1758. struct zfcp_unit *unit = req->data;
  1759. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  1760. return;
  1761. switch (req->qtcb->header.fsf_status) {
  1762. case FSF_PORT_HANDLE_NOT_VALID:
  1763. zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
  1764. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1765. break;
  1766. case FSF_LUN_HANDLE_NOT_VALID:
  1767. zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
  1768. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1769. break;
  1770. case FSF_PORT_BOXED:
  1771. zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
  1772. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  1773. ZFCP_STATUS_FSFREQ_RETRY;
  1774. break;
  1775. case FSF_ADAPTER_STATUS_AVAILABLE:
  1776. switch (req->qtcb->header.fsf_status_qual.word[0]) {
  1777. case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
  1778. zfcp_fc_test_link(unit->port);
  1779. /* fall through */
  1780. case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
  1781. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1782. break;
  1783. }
  1784. break;
  1785. case FSF_GOOD:
  1786. atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
  1787. break;
  1788. }
  1789. }
  1790. /**
  1791. * zfcp_fsf_close_unit - close zfcp unit
  1792. * @erp_action: pointer to struct zfcp_unit
  1793. * Returns: 0 on success, error otherwise
  1794. */
  1795. int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
  1796. {
  1797. struct qdio_buffer_element *sbale;
  1798. struct zfcp_qdio *qdio = erp_action->adapter->qdio;
  1799. struct zfcp_fsf_req *req;
  1800. int retval = -EIO;
  1801. spin_lock_bh(&qdio->req_q_lock);
  1802. if (zfcp_fsf_req_sbal_get(qdio))
  1803. goto out;
  1804. req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
  1805. qdio->adapter->pool.erp_req);
  1806. if (IS_ERR(req)) {
  1807. retval = PTR_ERR(req);
  1808. goto out;
  1809. }
  1810. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  1811. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  1812. sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
  1813. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  1814. req->qtcb->header.port_handle = erp_action->port->handle;
  1815. req->qtcb->header.lun_handle = erp_action->unit->handle;
  1816. req->handler = zfcp_fsf_close_unit_handler;
  1817. req->data = erp_action->unit;
  1818. req->erp_action = erp_action;
  1819. erp_action->fsf_req = req;
  1820. zfcp_fsf_start_erp_timer(req);
  1821. retval = zfcp_fsf_req_send(req);
  1822. if (retval) {
  1823. zfcp_fsf_req_free(req);
  1824. erp_action->fsf_req = NULL;
  1825. }
  1826. out:
  1827. spin_unlock_bh(&qdio->req_q_lock);
  1828. return retval;
  1829. }
  1830. static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
  1831. {
  1832. lat_rec->sum += lat;
  1833. lat_rec->min = min(lat_rec->min, lat);
  1834. lat_rec->max = max(lat_rec->max, lat);
  1835. }
  1836. static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
  1837. {
  1838. struct fsf_qual_latency_info *lat_in;
  1839. struct latency_cont *lat = NULL;
  1840. struct zfcp_unit *unit = req->unit;
  1841. struct zfcp_blk_drv_data blktrc;
  1842. int ticks = req->adapter->timer_ticks;
  1843. lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
  1844. blktrc.flags = 0;
  1845. blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
  1846. if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
  1847. blktrc.flags |= ZFCP_BLK_REQ_ERROR;
  1848. blktrc.inb_usage = req->queue_req.qdio_inb_usage;
  1849. blktrc.outb_usage = req->queue_req.qdio_outb_usage;
  1850. if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
  1851. blktrc.flags |= ZFCP_BLK_LAT_VALID;
  1852. blktrc.channel_lat = lat_in->channel_lat * ticks;
  1853. blktrc.fabric_lat = lat_in->fabric_lat * ticks;
  1854. switch (req->qtcb->bottom.io.data_direction) {
  1855. case FSF_DATADIR_READ:
  1856. lat = &unit->latencies.read;
  1857. break;
  1858. case FSF_DATADIR_WRITE:
  1859. lat = &unit->latencies.write;
  1860. break;
  1861. case FSF_DATADIR_CMND:
  1862. lat = &unit->latencies.cmd;
  1863. break;
  1864. }
  1865. if (lat) {
  1866. spin_lock(&unit->latencies.lock);
  1867. zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
  1868. zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
  1869. lat->counter++;
  1870. spin_unlock(&unit->latencies.lock);
  1871. }
  1872. }
  1873. blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
  1874. sizeof(blktrc));
  1875. }
  1876. static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
  1877. {
  1878. struct scsi_cmnd *scpnt;
  1879. struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
  1880. &(req->qtcb->bottom.io.fcp_rsp);
  1881. u32 sns_len;
  1882. char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
  1883. unsigned long flags;
  1884. read_lock_irqsave(&req->adapter->abort_lock, flags);
  1885. scpnt = req->data;
  1886. if (unlikely(!scpnt)) {
  1887. read_unlock_irqrestore(&req->adapter->abort_lock, flags);
  1888. return;
  1889. }
  1890. if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
  1891. set_host_byte(scpnt, DID_SOFT_ERROR);
  1892. goto skip_fsfstatus;
  1893. }
  1894. if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
  1895. set_host_byte(scpnt, DID_ERROR);
  1896. goto skip_fsfstatus;
  1897. }
  1898. set_msg_byte(scpnt, COMMAND_COMPLETE);
  1899. scpnt->result |= fcp_rsp_iu->scsi_status;
  1900. zfcp_fsf_req_trace(req, scpnt);
  1901. if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
  1902. if (fcp_rsp_info[3] == RSP_CODE_GOOD)
  1903. set_host_byte(scpnt, DID_OK);
  1904. else {
  1905. set_host_byte(scpnt, DID_ERROR);
  1906. goto skip_fsfstatus;
  1907. }
  1908. }
  1909. if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
  1910. sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
  1911. fcp_rsp_iu->fcp_rsp_len;
  1912. sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
  1913. sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
  1914. memcpy(scpnt->sense_buffer,
  1915. zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
  1916. }
  1917. if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
  1918. scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
  1919. if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
  1920. scpnt->underflow)
  1921. set_host_byte(scpnt, DID_ERROR);
  1922. }
  1923. skip_fsfstatus:
  1924. if (scpnt->result != 0)
  1925. zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
  1926. else if (scpnt->retries > 0)
  1927. zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
  1928. else
  1929. zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
  1930. scpnt->host_scribble = NULL;
  1931. (scpnt->scsi_done) (scpnt);
  1932. /*
  1933. * We must hold this lock until scsi_done has been called.
  1934. * Otherwise we may call scsi_done after abort regarding this
  1935. * command has completed.
  1936. * Note: scsi_done must not block!
  1937. */
  1938. read_unlock_irqrestore(&req->adapter->abort_lock, flags);
  1939. }
  1940. static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
  1941. {
  1942. struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
  1943. &(req->qtcb->bottom.io.fcp_rsp);
  1944. char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
  1945. if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
  1946. (req->status & ZFCP_STATUS_FSFREQ_ERROR))
  1947. req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
  1948. }
  1949. static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
  1950. {
  1951. struct zfcp_unit *unit;
  1952. struct fsf_qtcb_header *header = &req->qtcb->header;
  1953. if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
  1954. unit = req->data;
  1955. else
  1956. unit = req->unit;
  1957. if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
  1958. goto skip_fsfstatus;
  1959. switch (header->fsf_status) {
  1960. case FSF_HANDLE_MISMATCH:
  1961. case FSF_PORT_HANDLE_NOT_VALID:
  1962. zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
  1963. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1964. break;
  1965. case FSF_FCPLUN_NOT_VALID:
  1966. case FSF_LUN_HANDLE_NOT_VALID:
  1967. zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
  1968. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1969. break;
  1970. case FSF_SERVICE_CLASS_NOT_SUPPORTED:
  1971. zfcp_fsf_class_not_supp(req);
  1972. break;
  1973. case FSF_ACCESS_DENIED:
  1974. zfcp_fsf_access_denied_unit(req, unit);
  1975. break;
  1976. case FSF_DIRECTION_INDICATOR_NOT_VALID:
  1977. dev_err(&req->adapter->ccw_device->dev,
  1978. "Incorrect direction %d, unit 0x%016Lx on port "
  1979. "0x%016Lx closed\n",
  1980. req->qtcb->bottom.io.data_direction,
  1981. (unsigned long long)unit->fcp_lun,
  1982. (unsigned long long)unit->port->wwpn);
  1983. zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
  1984. req);
  1985. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1986. break;
  1987. case FSF_CMND_LENGTH_NOT_VALID:
  1988. dev_err(&req->adapter->ccw_device->dev,
  1989. "Incorrect CDB length %d, unit 0x%016Lx on "
  1990. "port 0x%016Lx closed\n",
  1991. req->qtcb->bottom.io.fcp_cmnd_length,
  1992. (unsigned long long)unit->fcp_lun,
  1993. (unsigned long long)unit->port->wwpn);
  1994. zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
  1995. req);
  1996. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  1997. break;
  1998. case FSF_PORT_BOXED:
  1999. zfcp_erp_port_boxed(unit->port, "fssfch5", req);
  2000. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  2001. ZFCP_STATUS_FSFREQ_RETRY;
  2002. break;
  2003. case FSF_LUN_BOXED:
  2004. zfcp_erp_unit_boxed(unit, "fssfch6", req);
  2005. req->status |= ZFCP_STATUS_FSFREQ_ERROR |
  2006. ZFCP_STATUS_FSFREQ_RETRY;
  2007. break;
  2008. case FSF_ADAPTER_STATUS_AVAILABLE:
  2009. if (header->fsf_status_qual.word[0] ==
  2010. FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
  2011. zfcp_fc_test_link(unit->port);
  2012. req->status |= ZFCP_STATUS_FSFREQ_ERROR;
  2013. break;
  2014. }
  2015. skip_fsfstatus:
  2016. if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
  2017. zfcp_fsf_send_fcp_ctm_handler(req);
  2018. else {
  2019. zfcp_fsf_send_fcp_command_task_handler(req);
  2020. req->unit = NULL;
  2021. put_device(&unit->sysfs_device);
  2022. }
  2023. }
  2024. static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
  2025. {
  2026. u32 *fcp_dl_ptr;
  2027. /*
  2028. * fcp_dl_addr = start address of fcp_cmnd structure +
  2029. * size of fixed part + size of dynamically sized add_dcp_cdb field
  2030. * SEE FCP-2 documentation
  2031. */
  2032. fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
  2033. (fcp_cmd->add_fcp_cdb_length << 2));
  2034. *fcp_dl_ptr = fcp_dl;
  2035. }
  2036. /**
  2037. * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
  2038. * @unit: unit where command is sent to
  2039. * @scsi_cmnd: scsi command to be sent
  2040. */
  2041. int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
  2042. struct scsi_cmnd *scsi_cmnd)
  2043. {
  2044. struct zfcp_fsf_req *req;
  2045. struct fcp_cmnd_iu *fcp_cmnd_iu;
  2046. unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
  2047. int real_bytes, retval = -EIO;
  2048. struct zfcp_adapter *adapter = unit->port->adapter;
  2049. struct zfcp_qdio *qdio = adapter->qdio;
  2050. if (unlikely(!(atomic_read(&unit->status) &
  2051. ZFCP_STATUS_COMMON_UNBLOCKED)))
  2052. return -EBUSY;
  2053. spin_lock(&qdio->req_q_lock);
  2054. if (atomic_read(&qdio->req_q.count) <= 0) {
  2055. atomic_inc(&qdio->req_q_full);
  2056. goto out;
  2057. }
  2058. req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
  2059. adapter->pool.scsi_req);
  2060. if (IS_ERR(req)) {
  2061. retval = PTR_ERR(req);
  2062. goto out;
  2063. }
  2064. req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
  2065. get_device(&unit->sysfs_device);
  2066. req->unit = unit;
  2067. req->data = scsi_cmnd;
  2068. req->handler = zfcp_fsf_send_fcp_command_handler;
  2069. req->qtcb->header.lun_handle = unit->handle;
  2070. req->qtcb->header.port_handle = unit->port->handle;
  2071. req->qtcb->bottom.io.service_class = FSF_CLASS_3;
  2072. scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
  2073. fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
  2074. fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
  2075. /*
  2076. * set depending on data direction:
  2077. * data direction bits in SBALE (SB Type)
  2078. * data direction bits in QTCB
  2079. * data direction bits in FCP_CMND IU
  2080. */
  2081. switch (scsi_cmnd->sc_data_direction) {
  2082. case DMA_NONE:
  2083. req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
  2084. break;
  2085. case DMA_FROM_DEVICE:
  2086. req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
  2087. fcp_cmnd_iu->rddata = 1;
  2088. break;
  2089. case DMA_TO_DEVICE:
  2090. req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
  2091. sbtype = SBAL_FLAGS0_TYPE_WRITE;
  2092. fcp_cmnd_iu->wddata = 1;
  2093. break;
  2094. case DMA_BIDIRECTIONAL:
  2095. goto failed_scsi_cmnd;
  2096. }
  2097. if (likely((scsi_cmnd->device->simple_tags) ||
  2098. ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
  2099. (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
  2100. fcp_cmnd_iu->task_attribute = SIMPLE_Q;
  2101. else
  2102. fcp_cmnd_iu->task_attribute = UNTAGGED;
  2103. if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
  2104. fcp_cmnd_iu->add_fcp_cdb_length =
  2105. (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
  2106. memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
  2107. req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
  2108. fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
  2109. real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
  2110. scsi_sglist(scsi_cmnd),
  2111. FSF_MAX_SBALS_PER_REQ);
  2112. if (unlikely(real_bytes < 0)) {
  2113. if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
  2114. dev_err(&adapter->ccw_device->dev,
  2115. "Oversize data package, unit 0x%016Lx "
  2116. "on port 0x%016Lx closed\n",
  2117. (unsigned long long)unit->fcp_lun,
  2118. (unsigned long long)unit->port->wwpn);
  2119. zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
  2120. retval = -EINVAL;
  2121. }
  2122. goto failed_scsi_cmnd;
  2123. }
  2124. zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
  2125. retval = zfcp_fsf_req_send(req);
  2126. if (unlikely(retval))
  2127. goto failed_scsi_cmnd;
  2128. goto out;
  2129. failed_scsi_cmnd:
  2130. put_device(&unit->sysfs_device);
  2131. zfcp_fsf_req_free(req);
  2132. scsi_cmnd->host_scribble = NULL;
  2133. out:
  2134. spin_unlock(&qdio->req_q_lock);
  2135. return retval;
  2136. }
  2137. /**
  2138. * zfcp_fsf_send_fcp_ctm - send SCSI task management command
  2139. * @unit: pointer to struct zfcp_unit
  2140. * @tm_flags: unsigned byte for task management flags
  2141. * Returns: on success pointer to struct fsf_req, NULL otherwise
  2142. */
  2143. struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
  2144. {
  2145. struct qdio_buffer_element *sbale;
  2146. struct zfcp_fsf_req *req = NULL;
  2147. struct fcp_cmnd_iu *fcp_cmnd_iu;
  2148. struct zfcp_qdio *qdio = unit->port->adapter->qdio;
  2149. if (unlikely(!(atomic_read(&unit->status) &
  2150. ZFCP_STATUS_COMMON_UNBLOCKED)))
  2151. return NULL;
  2152. spin_lock_bh(&qdio->req_q_lock);
  2153. if (zfcp_fsf_req_sbal_get(qdio))
  2154. goto out;
  2155. req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
  2156. qdio->adapter->pool.scsi_req);
  2157. if (IS_ERR(req)) {
  2158. req = NULL;
  2159. goto out;
  2160. }
  2161. req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
  2162. req->data = unit;
  2163. req->handler = zfcp_fsf_send_fcp_command_handler;
  2164. req->qtcb->header.lun_handle = unit->handle;
  2165. req->qtcb->header.port_handle = unit->port->handle;
  2166. req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
  2167. req->qtcb->bottom.io.service_class = FSF_CLASS_3;
  2168. req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
  2169. sizeof(u32);
  2170. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  2171. sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
  2172. sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
  2173. fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
  2174. fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
  2175. fcp_cmnd_iu->task_management_flags = tm_flags;
  2176. zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
  2177. if (!zfcp_fsf_req_send(req))
  2178. goto out;
  2179. zfcp_fsf_req_free(req);
  2180. req = NULL;
  2181. out:
  2182. spin_unlock_bh(&qdio->req_q_lock);
  2183. return req;
  2184. }
  2185. static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
  2186. {
  2187. }
  2188. /**
  2189. * zfcp_fsf_control_file - control file upload/download
  2190. * @adapter: pointer to struct zfcp_adapter
  2191. * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
  2192. * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
  2193. */
  2194. struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
  2195. struct zfcp_fsf_cfdc *fsf_cfdc)
  2196. {
  2197. struct qdio_buffer_element *sbale;
  2198. struct zfcp_qdio *qdio = adapter->qdio;
  2199. struct zfcp_fsf_req *req = NULL;
  2200. struct fsf_qtcb_bottom_support *bottom;
  2201. int direction, retval = -EIO, bytes;
  2202. if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
  2203. return ERR_PTR(-EOPNOTSUPP);
  2204. switch (fsf_cfdc->command) {
  2205. case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
  2206. direction = SBAL_FLAGS0_TYPE_WRITE;
  2207. break;
  2208. case FSF_QTCB_UPLOAD_CONTROL_FILE:
  2209. direction = SBAL_FLAGS0_TYPE_READ;
  2210. break;
  2211. default:
  2212. return ERR_PTR(-EINVAL);
  2213. }
  2214. spin_lock_bh(&qdio->req_q_lock);
  2215. if (zfcp_fsf_req_sbal_get(qdio))
  2216. goto out;
  2217. req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
  2218. if (IS_ERR(req)) {
  2219. retval = -EPERM;
  2220. goto out;
  2221. }
  2222. req->handler = zfcp_fsf_control_file_handler;
  2223. sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
  2224. sbale[0].flags |= direction;
  2225. bottom = &req->qtcb->bottom.support;
  2226. bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
  2227. bottom->option = fsf_cfdc->option;
  2228. bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
  2229. direction, fsf_cfdc->sg,
  2230. FSF_MAX_SBALS_PER_REQ);
  2231. if (bytes != ZFCP_CFDC_MAX_SIZE) {
  2232. zfcp_fsf_req_free(req);
  2233. goto out;
  2234. }
  2235. zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
  2236. retval = zfcp_fsf_req_send(req);
  2237. out:
  2238. spin_unlock_bh(&qdio->req_q_lock);
  2239. if (!retval) {
  2240. wait_for_completion(&req->completion);
  2241. return req;
  2242. }
  2243. return ERR_PTR(retval);
  2244. }
  2245. /**
  2246. * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
  2247. * @adapter: pointer to struct zfcp_adapter
  2248. * @sbal_idx: response queue index of SBAL to be processed
  2249. */
  2250. void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
  2251. {
  2252. struct zfcp_adapter *adapter = qdio->adapter;
  2253. struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
  2254. struct qdio_buffer_element *sbale;
  2255. struct zfcp_fsf_req *fsf_req;
  2256. unsigned long flags, req_id;
  2257. int idx;
  2258. for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
  2259. sbale = &sbal->element[idx];
  2260. req_id = (unsigned long) sbale->addr;
  2261. spin_lock_irqsave(&adapter->req_list_lock, flags);
  2262. fsf_req = zfcp_reqlist_find(adapter, req_id);
  2263. if (!fsf_req)
  2264. /*
  2265. * Unknown request means that we have potentially memory
  2266. * corruption and must stop the machine immediately.
  2267. */
  2268. panic("error: unknown req_id (%lx) on adapter %s.\n",
  2269. req_id, dev_name(&adapter->ccw_device->dev));
  2270. list_del(&fsf_req->list);
  2271. spin_unlock_irqrestore(&adapter->req_list_lock, flags);
  2272. fsf_req->queue_req.sbal_response = sbal_idx;
  2273. fsf_req->queue_req.qdio_inb_usage =
  2274. atomic_read(&qdio->resp_q.count);
  2275. zfcp_fsf_req_complete(fsf_req);
  2276. if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
  2277. break;
  2278. }
  2279. }