dasd_eckd.c 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883
  1. /*
  2. * File...........: linux/drivers/s390/block/dasd_eckd.c
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * Copyright IBM Corp. 1999, 2009
  9. * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
  10. * Author.........: Nigel Hislop <hislop_nigel@emc.com>
  11. */
  12. #define KMSG_COMPONENT "dasd-eckd"
  13. #include <linux/stddef.h>
  14. #include <linux/kernel.h>
  15. #include <linux/slab.h>
  16. #include <linux/hdreg.h> /* HDIO_GETGEO */
  17. #include <linux/bio.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <asm/debug.h>
  21. #include <asm/idals.h>
  22. #include <asm/ebcdic.h>
  23. #include <asm/compat.h>
  24. #include <asm/io.h>
  25. #include <asm/uaccess.h>
  26. #include <asm/cio.h>
  27. #include <asm/ccwdev.h>
  28. #include <asm/itcw.h>
  29. #include "dasd_int.h"
  30. #include "dasd_eckd.h"
  31. #include "../cio/chsc.h"
  32. #ifdef PRINTK_HEADER
  33. #undef PRINTK_HEADER
  34. #endif /* PRINTK_HEADER */
  35. #define PRINTK_HEADER "dasd(eckd):"
  36. #define ECKD_C0(i) (i->home_bytes)
  37. #define ECKD_F(i) (i->formula)
  38. #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
  39. (i->factors.f_0x02.f1))
  40. #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
  41. (i->factors.f_0x02.f2))
  42. #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
  43. (i->factors.f_0x02.f3))
  44. #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
  45. #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
  46. #define ECKD_F6(i) (i->factor6)
  47. #define ECKD_F7(i) (i->factor7)
  48. #define ECKD_F8(i) (i->factor8)
  49. MODULE_LICENSE("GPL");
  50. static struct dasd_discipline dasd_eckd_discipline;
  51. /* The ccw bus type uses this table to find devices that it sends to
  52. * dasd_eckd_probe */
  53. static struct ccw_device_id dasd_eckd_ids[] = {
  54. { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
  55. { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
  56. { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
  57. { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
  58. { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
  59. { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
  60. { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
  61. { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
  62. { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
  63. { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
  64. { /* end of list */ },
  65. };
  66. MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
  67. static struct ccw_driver dasd_eckd_driver; /* see below */
  68. #define INIT_CQR_OK 0
  69. #define INIT_CQR_UNFORMATTED 1
  70. #define INIT_CQR_ERROR 2
  71. /* emergency request for reserve/release */
  72. static struct {
  73. struct dasd_ccw_req cqr;
  74. struct ccw1 ccw;
  75. char data[32];
  76. } *dasd_reserve_req;
  77. static DEFINE_MUTEX(dasd_reserve_mutex);
  78. /* definitions for the path verification worker */
  79. struct path_verification_work_data {
  80. struct work_struct worker;
  81. struct dasd_device *device;
  82. struct dasd_ccw_req cqr;
  83. struct ccw1 ccw;
  84. __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
  85. int isglobal;
  86. __u8 tbvpm;
  87. };
  88. static struct path_verification_work_data *path_verification_worker;
  89. static DEFINE_MUTEX(dasd_path_verification_mutex);
  90. /* initial attempt at a probe function. this can be simplified once
  91. * the other detection code is gone */
  92. static int
  93. dasd_eckd_probe (struct ccw_device *cdev)
  94. {
  95. int ret;
  96. /* set ECKD specific ccw-device options */
  97. ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
  98. CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
  99. if (ret) {
  100. DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
  101. "dasd_eckd_probe: could not set "
  102. "ccw-device options");
  103. return ret;
  104. }
  105. ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
  106. return ret;
  107. }
  108. static int
  109. dasd_eckd_set_online(struct ccw_device *cdev)
  110. {
  111. return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
  112. }
  113. static const int sizes_trk0[] = { 28, 148, 84 };
  114. #define LABEL_SIZE 140
  115. static inline unsigned int
  116. round_up_multiple(unsigned int no, unsigned int mult)
  117. {
  118. int rem = no % mult;
  119. return (rem ? no - rem + mult : no);
  120. }
  121. static inline unsigned int
  122. ceil_quot(unsigned int d1, unsigned int d2)
  123. {
  124. return (d1 + (d2 - 1)) / d2;
  125. }
  126. static unsigned int
  127. recs_per_track(struct dasd_eckd_characteristics * rdc,
  128. unsigned int kl, unsigned int dl)
  129. {
  130. int dn, kn;
  131. switch (rdc->dev_type) {
  132. case 0x3380:
  133. if (kl)
  134. return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
  135. ceil_quot(dl + 12, 32));
  136. else
  137. return 1499 / (15 + ceil_quot(dl + 12, 32));
  138. case 0x3390:
  139. dn = ceil_quot(dl + 6, 232) + 1;
  140. if (kl) {
  141. kn = ceil_quot(kl + 6, 232) + 1;
  142. return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
  143. 9 + ceil_quot(dl + 6 * dn, 34));
  144. } else
  145. return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
  146. case 0x9345:
  147. dn = ceil_quot(dl + 6, 232) + 1;
  148. if (kl) {
  149. kn = ceil_quot(kl + 6, 232) + 1;
  150. return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
  151. ceil_quot(dl + 6 * dn, 34));
  152. } else
  153. return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
  154. }
  155. return 0;
  156. }
  157. static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
  158. {
  159. geo->cyl = (__u16) cyl;
  160. geo->head = cyl >> 16;
  161. geo->head <<= 4;
  162. geo->head |= head;
  163. }
  164. static int
  165. check_XRC (struct ccw1 *de_ccw,
  166. struct DE_eckd_data *data,
  167. struct dasd_device *device)
  168. {
  169. struct dasd_eckd_private *private;
  170. int rc;
  171. private = (struct dasd_eckd_private *) device->private;
  172. if (!private->rdc_data.facilities.XRC_supported)
  173. return 0;
  174. /* switch on System Time Stamp - needed for XRC Support */
  175. data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
  176. data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
  177. rc = get_sync_clock(&data->ep_sys_time);
  178. /* Ignore return code if sync clock is switched off. */
  179. if (rc == -ENOSYS || rc == -EACCES)
  180. rc = 0;
  181. de_ccw->count = sizeof(struct DE_eckd_data);
  182. de_ccw->flags |= CCW_FLAG_SLI;
  183. return rc;
  184. }
  185. static int
  186. define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
  187. unsigned int totrk, int cmd, struct dasd_device *device)
  188. {
  189. struct dasd_eckd_private *private;
  190. u32 begcyl, endcyl;
  191. u16 heads, beghead, endhead;
  192. int rc = 0;
  193. private = (struct dasd_eckd_private *) device->private;
  194. ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
  195. ccw->flags = 0;
  196. ccw->count = 16;
  197. ccw->cda = (__u32) __pa(data);
  198. memset(data, 0, sizeof(struct DE_eckd_data));
  199. switch (cmd) {
  200. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  201. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  202. case DASD_ECKD_CCW_READ:
  203. case DASD_ECKD_CCW_READ_MT:
  204. case DASD_ECKD_CCW_READ_CKD:
  205. case DASD_ECKD_CCW_READ_CKD_MT:
  206. case DASD_ECKD_CCW_READ_KD:
  207. case DASD_ECKD_CCW_READ_KD_MT:
  208. case DASD_ECKD_CCW_READ_COUNT:
  209. data->mask.perm = 0x1;
  210. data->attributes.operation = private->attrib.operation;
  211. break;
  212. case DASD_ECKD_CCW_WRITE:
  213. case DASD_ECKD_CCW_WRITE_MT:
  214. case DASD_ECKD_CCW_WRITE_KD:
  215. case DASD_ECKD_CCW_WRITE_KD_MT:
  216. data->mask.perm = 0x02;
  217. data->attributes.operation = private->attrib.operation;
  218. rc = check_XRC (ccw, data, device);
  219. break;
  220. case DASD_ECKD_CCW_WRITE_CKD:
  221. case DASD_ECKD_CCW_WRITE_CKD_MT:
  222. data->attributes.operation = DASD_BYPASS_CACHE;
  223. rc = check_XRC (ccw, data, device);
  224. break;
  225. case DASD_ECKD_CCW_ERASE:
  226. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  227. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  228. data->mask.perm = 0x3;
  229. data->mask.auth = 0x1;
  230. data->attributes.operation = DASD_BYPASS_CACHE;
  231. rc = check_XRC (ccw, data, device);
  232. break;
  233. default:
  234. dev_err(&device->cdev->dev,
  235. "0x%x is not a known command\n", cmd);
  236. break;
  237. }
  238. data->attributes.mode = 0x3; /* ECKD */
  239. if ((private->rdc_data.cu_type == 0x2105 ||
  240. private->rdc_data.cu_type == 0x2107 ||
  241. private->rdc_data.cu_type == 0x1750)
  242. && !(private->uses_cdl && trk < 2))
  243. data->ga_extended |= 0x40; /* Regular Data Format Mode */
  244. heads = private->rdc_data.trk_per_cyl;
  245. begcyl = trk / heads;
  246. beghead = trk % heads;
  247. endcyl = totrk / heads;
  248. endhead = totrk % heads;
  249. /* check for sequential prestage - enhance cylinder range */
  250. if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
  251. data->attributes.operation == DASD_SEQ_ACCESS) {
  252. if (endcyl + private->attrib.nr_cyl < private->real_cyl)
  253. endcyl += private->attrib.nr_cyl;
  254. else
  255. endcyl = (private->real_cyl - 1);
  256. }
  257. set_ch_t(&data->beg_ext, begcyl, beghead);
  258. set_ch_t(&data->end_ext, endcyl, endhead);
  259. return rc;
  260. }
  261. static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
  262. struct dasd_device *device)
  263. {
  264. struct dasd_eckd_private *private;
  265. int rc;
  266. private = (struct dasd_eckd_private *) device->private;
  267. if (!private->rdc_data.facilities.XRC_supported)
  268. return 0;
  269. /* switch on System Time Stamp - needed for XRC Support */
  270. pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */
  271. pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
  272. pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
  273. rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
  274. /* Ignore return code if sync clock is switched off. */
  275. if (rc == -ENOSYS || rc == -EACCES)
  276. rc = 0;
  277. return rc;
  278. }
  279. static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
  280. unsigned int rec_on_trk, int count, int cmd,
  281. struct dasd_device *device, unsigned int reclen,
  282. unsigned int tlf)
  283. {
  284. struct dasd_eckd_private *private;
  285. int sector;
  286. int dn, d;
  287. private = (struct dasd_eckd_private *) device->private;
  288. memset(data, 0, sizeof(*data));
  289. sector = 0;
  290. if (rec_on_trk) {
  291. switch (private->rdc_data.dev_type) {
  292. case 0x3390:
  293. dn = ceil_quot(reclen + 6, 232);
  294. d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
  295. sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
  296. break;
  297. case 0x3380:
  298. d = 7 + ceil_quot(reclen + 12, 32);
  299. sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
  300. break;
  301. }
  302. }
  303. data->sector = sector;
  304. /* note: meaning of count depends on the operation
  305. * for record based I/O it's the number of records, but for
  306. * track based I/O it's the number of tracks
  307. */
  308. data->count = count;
  309. switch (cmd) {
  310. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  311. data->operation.orientation = 0x3;
  312. data->operation.operation = 0x03;
  313. break;
  314. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  315. data->operation.orientation = 0x3;
  316. data->operation.operation = 0x16;
  317. break;
  318. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  319. data->operation.orientation = 0x1;
  320. data->operation.operation = 0x03;
  321. data->count++;
  322. break;
  323. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  324. data->operation.orientation = 0x3;
  325. data->operation.operation = 0x16;
  326. data->count++;
  327. break;
  328. case DASD_ECKD_CCW_WRITE:
  329. case DASD_ECKD_CCW_WRITE_MT:
  330. case DASD_ECKD_CCW_WRITE_KD:
  331. case DASD_ECKD_CCW_WRITE_KD_MT:
  332. data->auxiliary.length_valid = 0x1;
  333. data->length = reclen;
  334. data->operation.operation = 0x01;
  335. break;
  336. case DASD_ECKD_CCW_WRITE_CKD:
  337. case DASD_ECKD_CCW_WRITE_CKD_MT:
  338. data->auxiliary.length_valid = 0x1;
  339. data->length = reclen;
  340. data->operation.operation = 0x03;
  341. break;
  342. case DASD_ECKD_CCW_WRITE_TRACK_DATA:
  343. data->auxiliary.length_valid = 0x1;
  344. data->length = reclen; /* not tlf, as one might think */
  345. data->operation.operation = 0x3F;
  346. data->extended_operation = 0x23;
  347. break;
  348. case DASD_ECKD_CCW_READ:
  349. case DASD_ECKD_CCW_READ_MT:
  350. case DASD_ECKD_CCW_READ_KD:
  351. case DASD_ECKD_CCW_READ_KD_MT:
  352. data->auxiliary.length_valid = 0x1;
  353. data->length = reclen;
  354. data->operation.operation = 0x06;
  355. break;
  356. case DASD_ECKD_CCW_READ_CKD:
  357. case DASD_ECKD_CCW_READ_CKD_MT:
  358. data->auxiliary.length_valid = 0x1;
  359. data->length = reclen;
  360. data->operation.operation = 0x16;
  361. break;
  362. case DASD_ECKD_CCW_READ_COUNT:
  363. data->operation.operation = 0x06;
  364. break;
  365. case DASD_ECKD_CCW_READ_TRACK_DATA:
  366. data->auxiliary.length_valid = 0x1;
  367. data->length = tlf;
  368. data->operation.operation = 0x0C;
  369. break;
  370. case DASD_ECKD_CCW_ERASE:
  371. data->length = reclen;
  372. data->auxiliary.length_valid = 0x1;
  373. data->operation.operation = 0x0b;
  374. break;
  375. default:
  376. DBF_DEV_EVENT(DBF_ERR, device,
  377. "fill LRE unknown opcode 0x%x", cmd);
  378. BUG();
  379. }
  380. set_ch_t(&data->seek_addr,
  381. trk / private->rdc_data.trk_per_cyl,
  382. trk % private->rdc_data.trk_per_cyl);
  383. data->search_arg.cyl = data->seek_addr.cyl;
  384. data->search_arg.head = data->seek_addr.head;
  385. data->search_arg.record = rec_on_trk;
  386. }
  387. static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
  388. unsigned int trk, unsigned int totrk, int cmd,
  389. struct dasd_device *basedev, struct dasd_device *startdev,
  390. unsigned char format, unsigned int rec_on_trk, int count,
  391. unsigned int blksize, unsigned int tlf)
  392. {
  393. struct dasd_eckd_private *basepriv, *startpriv;
  394. struct DE_eckd_data *dedata;
  395. struct LRE_eckd_data *lredata;
  396. u32 begcyl, endcyl;
  397. u16 heads, beghead, endhead;
  398. int rc = 0;
  399. basepriv = (struct dasd_eckd_private *) basedev->private;
  400. startpriv = (struct dasd_eckd_private *) startdev->private;
  401. dedata = &pfxdata->define_extent;
  402. lredata = &pfxdata->locate_record;
  403. ccw->cmd_code = DASD_ECKD_CCW_PFX;
  404. ccw->flags = 0;
  405. ccw->count = sizeof(*pfxdata);
  406. ccw->cda = (__u32) __pa(pfxdata);
  407. memset(pfxdata, 0, sizeof(*pfxdata));
  408. /* prefix data */
  409. if (format > 1) {
  410. DBF_DEV_EVENT(DBF_ERR, basedev,
  411. "PFX LRE unknown format 0x%x", format);
  412. BUG();
  413. return -EINVAL;
  414. }
  415. pfxdata->format = format;
  416. pfxdata->base_address = basepriv->ned->unit_addr;
  417. pfxdata->base_lss = basepriv->ned->ID;
  418. pfxdata->validity.define_extent = 1;
  419. /* private uid is kept up to date, conf_data may be outdated */
  420. if (startpriv->uid.type != UA_BASE_DEVICE) {
  421. pfxdata->validity.verify_base = 1;
  422. if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
  423. pfxdata->validity.hyper_pav = 1;
  424. }
  425. /* define extend data (mostly)*/
  426. switch (cmd) {
  427. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  428. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  429. case DASD_ECKD_CCW_READ:
  430. case DASD_ECKD_CCW_READ_MT:
  431. case DASD_ECKD_CCW_READ_CKD:
  432. case DASD_ECKD_CCW_READ_CKD_MT:
  433. case DASD_ECKD_CCW_READ_KD:
  434. case DASD_ECKD_CCW_READ_KD_MT:
  435. case DASD_ECKD_CCW_READ_COUNT:
  436. dedata->mask.perm = 0x1;
  437. dedata->attributes.operation = basepriv->attrib.operation;
  438. break;
  439. case DASD_ECKD_CCW_READ_TRACK_DATA:
  440. dedata->mask.perm = 0x1;
  441. dedata->attributes.operation = basepriv->attrib.operation;
  442. dedata->blk_size = 0;
  443. break;
  444. case DASD_ECKD_CCW_WRITE:
  445. case DASD_ECKD_CCW_WRITE_MT:
  446. case DASD_ECKD_CCW_WRITE_KD:
  447. case DASD_ECKD_CCW_WRITE_KD_MT:
  448. dedata->mask.perm = 0x02;
  449. dedata->attributes.operation = basepriv->attrib.operation;
  450. rc = check_XRC_on_prefix(pfxdata, basedev);
  451. break;
  452. case DASD_ECKD_CCW_WRITE_CKD:
  453. case DASD_ECKD_CCW_WRITE_CKD_MT:
  454. dedata->attributes.operation = DASD_BYPASS_CACHE;
  455. rc = check_XRC_on_prefix(pfxdata, basedev);
  456. break;
  457. case DASD_ECKD_CCW_ERASE:
  458. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  459. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  460. dedata->mask.perm = 0x3;
  461. dedata->mask.auth = 0x1;
  462. dedata->attributes.operation = DASD_BYPASS_CACHE;
  463. rc = check_XRC_on_prefix(pfxdata, basedev);
  464. break;
  465. case DASD_ECKD_CCW_WRITE_TRACK_DATA:
  466. dedata->mask.perm = 0x02;
  467. dedata->attributes.operation = basepriv->attrib.operation;
  468. dedata->blk_size = blksize;
  469. rc = check_XRC_on_prefix(pfxdata, basedev);
  470. break;
  471. default:
  472. DBF_DEV_EVENT(DBF_ERR, basedev,
  473. "PFX LRE unknown opcode 0x%x", cmd);
  474. BUG();
  475. return -EINVAL;
  476. }
  477. dedata->attributes.mode = 0x3; /* ECKD */
  478. if ((basepriv->rdc_data.cu_type == 0x2105 ||
  479. basepriv->rdc_data.cu_type == 0x2107 ||
  480. basepriv->rdc_data.cu_type == 0x1750)
  481. && !(basepriv->uses_cdl && trk < 2))
  482. dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
  483. heads = basepriv->rdc_data.trk_per_cyl;
  484. begcyl = trk / heads;
  485. beghead = trk % heads;
  486. endcyl = totrk / heads;
  487. endhead = totrk % heads;
  488. /* check for sequential prestage - enhance cylinder range */
  489. if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
  490. dedata->attributes.operation == DASD_SEQ_ACCESS) {
  491. if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
  492. endcyl += basepriv->attrib.nr_cyl;
  493. else
  494. endcyl = (basepriv->real_cyl - 1);
  495. }
  496. set_ch_t(&dedata->beg_ext, begcyl, beghead);
  497. set_ch_t(&dedata->end_ext, endcyl, endhead);
  498. if (format == 1) {
  499. fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
  500. basedev, blksize, tlf);
  501. }
  502. return rc;
  503. }
  504. static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
  505. unsigned int trk, unsigned int totrk, int cmd,
  506. struct dasd_device *basedev, struct dasd_device *startdev)
  507. {
  508. return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
  509. 0, 0, 0, 0, 0);
  510. }
  511. static void
  512. locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
  513. unsigned int rec_on_trk, int no_rec, int cmd,
  514. struct dasd_device * device, int reclen)
  515. {
  516. struct dasd_eckd_private *private;
  517. int sector;
  518. int dn, d;
  519. private = (struct dasd_eckd_private *) device->private;
  520. DBF_DEV_EVENT(DBF_INFO, device,
  521. "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
  522. trk, rec_on_trk, no_rec, cmd, reclen);
  523. ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
  524. ccw->flags = 0;
  525. ccw->count = 16;
  526. ccw->cda = (__u32) __pa(data);
  527. memset(data, 0, sizeof(struct LO_eckd_data));
  528. sector = 0;
  529. if (rec_on_trk) {
  530. switch (private->rdc_data.dev_type) {
  531. case 0x3390:
  532. dn = ceil_quot(reclen + 6, 232);
  533. d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
  534. sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
  535. break;
  536. case 0x3380:
  537. d = 7 + ceil_quot(reclen + 12, 32);
  538. sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
  539. break;
  540. }
  541. }
  542. data->sector = sector;
  543. data->count = no_rec;
  544. switch (cmd) {
  545. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  546. data->operation.orientation = 0x3;
  547. data->operation.operation = 0x03;
  548. break;
  549. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  550. data->operation.orientation = 0x3;
  551. data->operation.operation = 0x16;
  552. break;
  553. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  554. data->operation.orientation = 0x1;
  555. data->operation.operation = 0x03;
  556. data->count++;
  557. break;
  558. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  559. data->operation.orientation = 0x3;
  560. data->operation.operation = 0x16;
  561. data->count++;
  562. break;
  563. case DASD_ECKD_CCW_WRITE:
  564. case DASD_ECKD_CCW_WRITE_MT:
  565. case DASD_ECKD_CCW_WRITE_KD:
  566. case DASD_ECKD_CCW_WRITE_KD_MT:
  567. data->auxiliary.last_bytes_used = 0x1;
  568. data->length = reclen;
  569. data->operation.operation = 0x01;
  570. break;
  571. case DASD_ECKD_CCW_WRITE_CKD:
  572. case DASD_ECKD_CCW_WRITE_CKD_MT:
  573. data->auxiliary.last_bytes_used = 0x1;
  574. data->length = reclen;
  575. data->operation.operation = 0x03;
  576. break;
  577. case DASD_ECKD_CCW_READ:
  578. case DASD_ECKD_CCW_READ_MT:
  579. case DASD_ECKD_CCW_READ_KD:
  580. case DASD_ECKD_CCW_READ_KD_MT:
  581. data->auxiliary.last_bytes_used = 0x1;
  582. data->length = reclen;
  583. data->operation.operation = 0x06;
  584. break;
  585. case DASD_ECKD_CCW_READ_CKD:
  586. case DASD_ECKD_CCW_READ_CKD_MT:
  587. data->auxiliary.last_bytes_used = 0x1;
  588. data->length = reclen;
  589. data->operation.operation = 0x16;
  590. break;
  591. case DASD_ECKD_CCW_READ_COUNT:
  592. data->operation.operation = 0x06;
  593. break;
  594. case DASD_ECKD_CCW_ERASE:
  595. data->length = reclen;
  596. data->auxiliary.last_bytes_used = 0x1;
  597. data->operation.operation = 0x0b;
  598. break;
  599. default:
  600. DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
  601. "opcode 0x%x", cmd);
  602. }
  603. set_ch_t(&data->seek_addr,
  604. trk / private->rdc_data.trk_per_cyl,
  605. trk % private->rdc_data.trk_per_cyl);
  606. data->search_arg.cyl = data->seek_addr.cyl;
  607. data->search_arg.head = data->seek_addr.head;
  608. data->search_arg.record = rec_on_trk;
  609. }
  610. /*
  611. * Returns 1 if the block is one of the special blocks that needs
  612. * to get read/written with the KD variant of the command.
  613. * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
  614. * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
  615. * Luckily the KD variants differ only by one bit (0x08) from the
  616. * normal variant. So don't wonder about code like:
  617. * if (dasd_eckd_cdl_special(blk_per_trk, recid))
  618. * ccw->cmd_code |= 0x8;
  619. */
  620. static inline int
  621. dasd_eckd_cdl_special(int blk_per_trk, int recid)
  622. {
  623. if (recid < 3)
  624. return 1;
  625. if (recid < blk_per_trk)
  626. return 0;
  627. if (recid < 2 * blk_per_trk)
  628. return 1;
  629. return 0;
  630. }
  631. /*
  632. * Returns the record size for the special blocks of the cdl format.
  633. * Only returns something useful if dasd_eckd_cdl_special is true
  634. * for the recid.
  635. */
  636. static inline int
  637. dasd_eckd_cdl_reclen(int recid)
  638. {
  639. if (recid < 3)
  640. return sizes_trk0[recid];
  641. return LABEL_SIZE;
  642. }
  643. /*
  644. * Generate device unique id that specifies the physical device.
  645. */
  646. static int dasd_eckd_generate_uid(struct dasd_device *device)
  647. {
  648. struct dasd_eckd_private *private;
  649. struct dasd_uid *uid;
  650. int count;
  651. unsigned long flags;
  652. private = (struct dasd_eckd_private *) device->private;
  653. if (!private)
  654. return -ENODEV;
  655. if (!private->ned || !private->gneq)
  656. return -ENODEV;
  657. uid = &private->uid;
  658. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  659. memset(uid, 0, sizeof(struct dasd_uid));
  660. memcpy(uid->vendor, private->ned->HDA_manufacturer,
  661. sizeof(uid->vendor) - 1);
  662. EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
  663. memcpy(uid->serial, private->ned->HDA_location,
  664. sizeof(uid->serial) - 1);
  665. EBCASC(uid->serial, sizeof(uid->serial) - 1);
  666. uid->ssid = private->gneq->subsystemID;
  667. uid->real_unit_addr = private->ned->unit_addr;
  668. if (private->sneq) {
  669. uid->type = private->sneq->sua_flags;
  670. if (uid->type == UA_BASE_PAV_ALIAS)
  671. uid->base_unit_addr = private->sneq->base_unit_addr;
  672. } else {
  673. uid->type = UA_BASE_DEVICE;
  674. }
  675. if (private->vdsneq) {
  676. for (count = 0; count < 16; count++) {
  677. sprintf(uid->vduit+2*count, "%02x",
  678. private->vdsneq->uit[count]);
  679. }
  680. }
  681. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  682. return 0;
  683. }
  684. static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
  685. {
  686. struct dasd_eckd_private *private;
  687. unsigned long flags;
  688. if (device->private) {
  689. private = (struct dasd_eckd_private *)device->private;
  690. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  691. *uid = private->uid;
  692. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  693. return 0;
  694. }
  695. return -EINVAL;
  696. }
  697. static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
  698. struct dasd_ccw_req *cqr,
  699. __u8 *rcd_buffer,
  700. __u8 lpm)
  701. {
  702. struct ccw1 *ccw;
  703. /*
  704. * buffer has to start with EBCDIC "V1.0" to show
  705. * support for virtual device SNEQ
  706. */
  707. rcd_buffer[0] = 0xE5;
  708. rcd_buffer[1] = 0xF1;
  709. rcd_buffer[2] = 0x4B;
  710. rcd_buffer[3] = 0xF0;
  711. ccw = cqr->cpaddr;
  712. ccw->cmd_code = DASD_ECKD_CCW_RCD;
  713. ccw->flags = 0;
  714. ccw->cda = (__u32)(addr_t)rcd_buffer;
  715. ccw->count = DASD_ECKD_RCD_DATA_SIZE;
  716. cqr->magic = DASD_ECKD_MAGIC;
  717. cqr->startdev = device;
  718. cqr->memdev = device;
  719. cqr->block = NULL;
  720. cqr->expires = 10*HZ;
  721. cqr->lpm = lpm;
  722. cqr->retries = 256;
  723. cqr->buildclk = get_clock();
  724. cqr->status = DASD_CQR_FILLED;
  725. set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
  726. }
  727. static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
  728. struct dasd_ccw_req *cqr,
  729. __u8 *rcd_buffer,
  730. __u8 lpm)
  731. {
  732. struct ciw *ciw;
  733. int rc;
  734. /*
  735. * sanity check: scan for RCD command in extended SenseID data
  736. * some devices do not support RCD
  737. */
  738. ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
  739. if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
  740. return -EOPNOTSUPP;
  741. dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
  742. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  743. cqr->retries = 5;
  744. rc = dasd_sleep_on_immediatly(cqr);
  745. return rc;
  746. }
  747. static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
  748. void **rcd_buffer,
  749. int *rcd_buffer_size, __u8 lpm)
  750. {
  751. struct ciw *ciw;
  752. char *rcd_buf = NULL;
  753. int ret;
  754. struct dasd_ccw_req *cqr;
  755. /*
  756. * sanity check: scan for RCD command in extended SenseID data
  757. * some devices do not support RCD
  758. */
  759. ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
  760. if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
  761. ret = -EOPNOTSUPP;
  762. goto out_error;
  763. }
  764. rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
  765. if (!rcd_buf) {
  766. ret = -ENOMEM;
  767. goto out_error;
  768. }
  769. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
  770. 0, /* use rcd_buf as data ara */
  771. device);
  772. if (IS_ERR(cqr)) {
  773. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  774. "Could not allocate RCD request");
  775. ret = -ENOMEM;
  776. goto out_error;
  777. }
  778. dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
  779. ret = dasd_sleep_on(cqr);
  780. /*
  781. * on success we update the user input parms
  782. */
  783. dasd_sfree_request(cqr, cqr->memdev);
  784. if (ret)
  785. goto out_error;
  786. *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
  787. *rcd_buffer = rcd_buf;
  788. return 0;
  789. out_error:
  790. kfree(rcd_buf);
  791. *rcd_buffer = NULL;
  792. *rcd_buffer_size = 0;
  793. return ret;
  794. }
  795. static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
  796. {
  797. struct dasd_sneq *sneq;
  798. int i, count;
  799. private->ned = NULL;
  800. private->sneq = NULL;
  801. private->vdsneq = NULL;
  802. private->gneq = NULL;
  803. count = private->conf_len / sizeof(struct dasd_sneq);
  804. sneq = (struct dasd_sneq *)private->conf_data;
  805. for (i = 0; i < count; ++i) {
  806. if (sneq->flags.identifier == 1 && sneq->format == 1)
  807. private->sneq = sneq;
  808. else if (sneq->flags.identifier == 1 && sneq->format == 4)
  809. private->vdsneq = (struct vd_sneq *)sneq;
  810. else if (sneq->flags.identifier == 2)
  811. private->gneq = (struct dasd_gneq *)sneq;
  812. else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
  813. private->ned = (struct dasd_ned *)sneq;
  814. sneq++;
  815. }
  816. if (!private->ned || !private->gneq) {
  817. private->ned = NULL;
  818. private->sneq = NULL;
  819. private->vdsneq = NULL;
  820. private->gneq = NULL;
  821. return -EINVAL;
  822. }
  823. return 0;
  824. };
  825. static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
  826. {
  827. struct dasd_gneq *gneq;
  828. int i, count, found;
  829. count = conf_len / sizeof(*gneq);
  830. gneq = (struct dasd_gneq *)conf_data;
  831. found = 0;
  832. for (i = 0; i < count; ++i) {
  833. if (gneq->flags.identifier == 2) {
  834. found = 1;
  835. break;
  836. }
  837. gneq++;
  838. }
  839. if (found)
  840. return ((char *)gneq)[18] & 0x07;
  841. else
  842. return 0;
  843. }
  844. static int dasd_eckd_read_conf(struct dasd_device *device)
  845. {
  846. void *conf_data;
  847. int conf_len, conf_data_saved;
  848. int rc;
  849. __u8 lpm, opm;
  850. struct dasd_eckd_private *private;
  851. struct dasd_path *path_data;
  852. private = (struct dasd_eckd_private *) device->private;
  853. path_data = &device->path_data;
  854. opm = ccw_device_get_path_mask(device->cdev);
  855. lpm = 0x80;
  856. conf_data_saved = 0;
  857. /* get configuration data per operational path */
  858. for (lpm = 0x80; lpm; lpm>>= 1) {
  859. if (lpm & opm) {
  860. rc = dasd_eckd_read_conf_lpm(device, &conf_data,
  861. &conf_len, lpm);
  862. if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
  863. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  864. "Read configuration data returned "
  865. "error %d", rc);
  866. return rc;
  867. }
  868. if (conf_data == NULL) {
  869. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  870. "No configuration data "
  871. "retrieved");
  872. /* no further analysis possible */
  873. path_data->opm |= lpm;
  874. continue; /* no error */
  875. }
  876. /* save first valid configuration data */
  877. if (!conf_data_saved) {
  878. kfree(private->conf_data);
  879. private->conf_data = conf_data;
  880. private->conf_len = conf_len;
  881. if (dasd_eckd_identify_conf_parts(private)) {
  882. private->conf_data = NULL;
  883. private->conf_len = 0;
  884. kfree(conf_data);
  885. continue;
  886. }
  887. conf_data_saved++;
  888. }
  889. switch (dasd_eckd_path_access(conf_data, conf_len)) {
  890. case 0x02:
  891. path_data->npm |= lpm;
  892. break;
  893. case 0x03:
  894. path_data->ppm |= lpm;
  895. break;
  896. }
  897. path_data->opm |= lpm;
  898. if (conf_data != private->conf_data)
  899. kfree(conf_data);
  900. }
  901. }
  902. return 0;
  903. }
  904. static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
  905. {
  906. struct dasd_eckd_private *private;
  907. int mdc;
  908. u32 fcx_max_data;
  909. private = (struct dasd_eckd_private *) device->private;
  910. if (private->fcx_max_data) {
  911. mdc = ccw_device_get_mdc(device->cdev, lpm);
  912. if ((mdc < 0)) {
  913. dev_warn(&device->cdev->dev,
  914. "Detecting the maximum data size for zHPF "
  915. "requests failed (rc=%d) for a new path %x\n",
  916. mdc, lpm);
  917. return mdc;
  918. }
  919. fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
  920. if (fcx_max_data < private->fcx_max_data) {
  921. dev_warn(&device->cdev->dev,
  922. "The maximum data size for zHPF requests %u "
  923. "on a new path %x is below the active maximum "
  924. "%u\n", fcx_max_data, lpm,
  925. private->fcx_max_data);
  926. return -EACCES;
  927. }
  928. }
  929. return 0;
  930. }
  931. static void do_path_verification_work(struct work_struct *work)
  932. {
  933. struct path_verification_work_data *data;
  934. struct dasd_device *device;
  935. __u8 lpm, opm, npm, ppm, epm;
  936. unsigned long flags;
  937. int rc;
  938. data = container_of(work, struct path_verification_work_data, worker);
  939. device = data->device;
  940. opm = 0;
  941. npm = 0;
  942. ppm = 0;
  943. epm = 0;
  944. for (lpm = 0x80; lpm; lpm >>= 1) {
  945. if (lpm & data->tbvpm) {
  946. memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
  947. memset(&data->cqr, 0, sizeof(data->cqr));
  948. data->cqr.cpaddr = &data->ccw;
  949. rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
  950. data->rcd_buffer,
  951. lpm);
  952. if (!rc) {
  953. switch (dasd_eckd_path_access(data->rcd_buffer,
  954. DASD_ECKD_RCD_DATA_SIZE)) {
  955. case 0x02:
  956. npm |= lpm;
  957. break;
  958. case 0x03:
  959. ppm |= lpm;
  960. break;
  961. }
  962. opm |= lpm;
  963. } else if (rc == -EOPNOTSUPP) {
  964. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  965. "path verification: No configuration "
  966. "data retrieved");
  967. opm |= lpm;
  968. } else if (rc == -EAGAIN) {
  969. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  970. "path verification: device is stopped,"
  971. " try again later");
  972. epm |= lpm;
  973. } else {
  974. dev_warn(&device->cdev->dev,
  975. "Reading device feature codes failed "
  976. "(rc=%d) for new path %x\n", rc, lpm);
  977. continue;
  978. }
  979. if (verify_fcx_max_data(device, lpm)) {
  980. opm &= ~lpm;
  981. npm &= ~lpm;
  982. ppm &= ~lpm;
  983. }
  984. }
  985. }
  986. /*
  987. * There is a small chance that a path is lost again between
  988. * above path verification and the following modification of
  989. * the device opm mask. We could avoid that race here by using
  990. * yet another path mask, but we rather deal with this unlikely
  991. * situation in dasd_start_IO.
  992. */
  993. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  994. if (!device->path_data.opm && opm) {
  995. device->path_data.opm = opm;
  996. dasd_generic_path_operational(device);
  997. } else
  998. device->path_data.opm |= opm;
  999. device->path_data.npm |= npm;
  1000. device->path_data.ppm |= ppm;
  1001. device->path_data.tbvpm |= epm;
  1002. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1003. dasd_put_device(device);
  1004. if (data->isglobal)
  1005. mutex_unlock(&dasd_path_verification_mutex);
  1006. else
  1007. kfree(data);
  1008. }
  1009. static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
  1010. {
  1011. struct path_verification_work_data *data;
  1012. data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
  1013. if (!data) {
  1014. if (mutex_trylock(&dasd_path_verification_mutex)) {
  1015. data = path_verification_worker;
  1016. data->isglobal = 1;
  1017. } else
  1018. return -ENOMEM;
  1019. } else {
  1020. memset(data, 0, sizeof(*data));
  1021. data->isglobal = 0;
  1022. }
  1023. INIT_WORK(&data->worker, do_path_verification_work);
  1024. dasd_get_device(device);
  1025. data->device = device;
  1026. data->tbvpm = lpm;
  1027. schedule_work(&data->worker);
  1028. return 0;
  1029. }
  1030. static int dasd_eckd_read_features(struct dasd_device *device)
  1031. {
  1032. struct dasd_psf_prssd_data *prssdp;
  1033. struct dasd_rssd_features *features;
  1034. struct dasd_ccw_req *cqr;
  1035. struct ccw1 *ccw;
  1036. int rc;
  1037. struct dasd_eckd_private *private;
  1038. private = (struct dasd_eckd_private *) device->private;
  1039. memset(&private->features, 0, sizeof(struct dasd_rssd_features));
  1040. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  1041. (sizeof(struct dasd_psf_prssd_data) +
  1042. sizeof(struct dasd_rssd_features)),
  1043. device);
  1044. if (IS_ERR(cqr)) {
  1045. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
  1046. "allocate initialization request");
  1047. return PTR_ERR(cqr);
  1048. }
  1049. cqr->startdev = device;
  1050. cqr->memdev = device;
  1051. cqr->block = NULL;
  1052. cqr->retries = 256;
  1053. cqr->expires = 10 * HZ;
  1054. /* Prepare for Read Subsystem Data */
  1055. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  1056. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  1057. prssdp->order = PSF_ORDER_PRSSD;
  1058. prssdp->suborder = 0x41; /* Read Feature Codes */
  1059. /* all other bytes of prssdp must be zero */
  1060. ccw = cqr->cpaddr;
  1061. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  1062. ccw->count = sizeof(struct dasd_psf_prssd_data);
  1063. ccw->flags |= CCW_FLAG_CC;
  1064. ccw->cda = (__u32)(addr_t) prssdp;
  1065. /* Read Subsystem Data - feature codes */
  1066. features = (struct dasd_rssd_features *) (prssdp + 1);
  1067. memset(features, 0, sizeof(struct dasd_rssd_features));
  1068. ccw++;
  1069. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  1070. ccw->count = sizeof(struct dasd_rssd_features);
  1071. ccw->cda = (__u32)(addr_t) features;
  1072. cqr->buildclk = get_clock();
  1073. cqr->status = DASD_CQR_FILLED;
  1074. rc = dasd_sleep_on(cqr);
  1075. if (rc == 0) {
  1076. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  1077. features = (struct dasd_rssd_features *) (prssdp + 1);
  1078. memcpy(&private->features, features,
  1079. sizeof(struct dasd_rssd_features));
  1080. } else
  1081. dev_warn(&device->cdev->dev, "Reading device feature codes"
  1082. " failed with rc=%d\n", rc);
  1083. dasd_sfree_request(cqr, cqr->memdev);
  1084. return rc;
  1085. }
  1086. /*
  1087. * Build CP for Perform Subsystem Function - SSC.
  1088. */
  1089. static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
  1090. int enable_pav)
  1091. {
  1092. struct dasd_ccw_req *cqr;
  1093. struct dasd_psf_ssc_data *psf_ssc_data;
  1094. struct ccw1 *ccw;
  1095. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
  1096. sizeof(struct dasd_psf_ssc_data),
  1097. device);
  1098. if (IS_ERR(cqr)) {
  1099. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1100. "Could not allocate PSF-SSC request");
  1101. return cqr;
  1102. }
  1103. psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
  1104. psf_ssc_data->order = PSF_ORDER_SSC;
  1105. psf_ssc_data->suborder = 0xc0;
  1106. if (enable_pav) {
  1107. psf_ssc_data->suborder |= 0x08;
  1108. psf_ssc_data->reserved[0] = 0x88;
  1109. }
  1110. ccw = cqr->cpaddr;
  1111. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  1112. ccw->cda = (__u32)(addr_t)psf_ssc_data;
  1113. ccw->count = 66;
  1114. cqr->startdev = device;
  1115. cqr->memdev = device;
  1116. cqr->block = NULL;
  1117. cqr->retries = 256;
  1118. cqr->expires = 10*HZ;
  1119. cqr->buildclk = get_clock();
  1120. cqr->status = DASD_CQR_FILLED;
  1121. return cqr;
  1122. }
  1123. /*
  1124. * Perform Subsystem Function.
  1125. * It is necessary to trigger CIO for channel revalidation since this
  1126. * call might change behaviour of DASD devices.
  1127. */
  1128. static int
  1129. dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
  1130. {
  1131. struct dasd_ccw_req *cqr;
  1132. int rc;
  1133. cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
  1134. if (IS_ERR(cqr))
  1135. return PTR_ERR(cqr);
  1136. rc = dasd_sleep_on(cqr);
  1137. if (!rc)
  1138. /* trigger CIO to reprobe devices */
  1139. css_schedule_reprobe();
  1140. dasd_sfree_request(cqr, cqr->memdev);
  1141. return rc;
  1142. }
  1143. /*
  1144. * Valide storage server of current device.
  1145. */
  1146. static void dasd_eckd_validate_server(struct dasd_device *device)
  1147. {
  1148. int rc;
  1149. struct dasd_eckd_private *private;
  1150. int enable_pav;
  1151. if (dasd_nopav || MACHINE_IS_VM)
  1152. enable_pav = 0;
  1153. else
  1154. enable_pav = 1;
  1155. rc = dasd_eckd_psf_ssc(device, enable_pav);
  1156. /* may be requested feature is not available on server,
  1157. * therefore just report error and go ahead */
  1158. private = (struct dasd_eckd_private *) device->private;
  1159. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
  1160. "returned rc=%d", private->uid.ssid, rc);
  1161. }
  1162. static u32 get_fcx_max_data(struct dasd_device *device)
  1163. {
  1164. #if defined(CONFIG_64BIT)
  1165. int tpm, mdc;
  1166. int fcx_in_css, fcx_in_gneq, fcx_in_features;
  1167. struct dasd_eckd_private *private;
  1168. if (dasd_nofcx)
  1169. return 0;
  1170. /* is transport mode supported? */
  1171. private = (struct dasd_eckd_private *) device->private;
  1172. fcx_in_css = css_general_characteristics.fcx;
  1173. fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
  1174. fcx_in_features = private->features.feature[40] & 0x80;
  1175. tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
  1176. if (!tpm)
  1177. return 0;
  1178. mdc = ccw_device_get_mdc(device->cdev, 0);
  1179. if (mdc < 0) {
  1180. dev_warn(&device->cdev->dev, "Detecting the maximum supported"
  1181. " data size for zHPF requests failed\n");
  1182. return 0;
  1183. } else
  1184. return mdc * FCX_MAX_DATA_FACTOR;
  1185. #else
  1186. return 0;
  1187. #endif
  1188. }
  1189. /*
  1190. * Check device characteristics.
  1191. * If the device is accessible using ECKD discipline, the device is enabled.
  1192. */
  1193. static int
  1194. dasd_eckd_check_characteristics(struct dasd_device *device)
  1195. {
  1196. struct dasd_eckd_private *private;
  1197. struct dasd_block *block;
  1198. struct dasd_uid temp_uid;
  1199. int is_known, rc, i;
  1200. int readonly;
  1201. unsigned long value;
  1202. if (!ccw_device_is_pathgroup(device->cdev)) {
  1203. dev_warn(&device->cdev->dev,
  1204. "A channel path group could not be established\n");
  1205. return -EIO;
  1206. }
  1207. if (!ccw_device_is_multipath(device->cdev)) {
  1208. dev_info(&device->cdev->dev,
  1209. "The DASD is not operating in multipath mode\n");
  1210. }
  1211. private = (struct dasd_eckd_private *) device->private;
  1212. if (!private) {
  1213. private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
  1214. if (!private) {
  1215. dev_warn(&device->cdev->dev,
  1216. "Allocating memory for private DASD data "
  1217. "failed\n");
  1218. return -ENOMEM;
  1219. }
  1220. device->private = (void *) private;
  1221. } else {
  1222. memset(private, 0, sizeof(*private));
  1223. }
  1224. /* Invalidate status of initial analysis. */
  1225. private->init_cqr_status = -1;
  1226. /* Set default cache operations. */
  1227. private->attrib.operation = DASD_NORMAL_CACHE;
  1228. private->attrib.nr_cyl = 0;
  1229. /* Read Configuration Data */
  1230. rc = dasd_eckd_read_conf(device);
  1231. if (rc)
  1232. goto out_err1;
  1233. /* set default timeout */
  1234. device->default_expires = DASD_EXPIRES;
  1235. if (private->gneq) {
  1236. value = 1;
  1237. for (i = 0; i < private->gneq->timeout.value; i++)
  1238. value = 10 * value;
  1239. value = value * private->gneq->timeout.number;
  1240. /* do not accept useless values */
  1241. if (value != 0 && value <= DASD_EXPIRES_MAX)
  1242. device->default_expires = value;
  1243. }
  1244. /* Generate device unique id */
  1245. rc = dasd_eckd_generate_uid(device);
  1246. if (rc)
  1247. goto out_err1;
  1248. dasd_eckd_get_uid(device, &temp_uid);
  1249. if (temp_uid.type == UA_BASE_DEVICE) {
  1250. block = dasd_alloc_block();
  1251. if (IS_ERR(block)) {
  1252. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  1253. "could not allocate dasd "
  1254. "block structure");
  1255. rc = PTR_ERR(block);
  1256. goto out_err1;
  1257. }
  1258. device->block = block;
  1259. block->base = device;
  1260. }
  1261. /* register lcu with alias handling, enable PAV if this is a new lcu */
  1262. is_known = dasd_alias_make_device_known_to_lcu(device);
  1263. if (is_known < 0) {
  1264. rc = is_known;
  1265. goto out_err2;
  1266. }
  1267. /*
  1268. * dasd_eckd_validate_server is done on the first device that
  1269. * is found for an LCU. All later other devices have to wait
  1270. * for it, so they will read the correct feature codes.
  1271. */
  1272. if (!is_known) {
  1273. dasd_eckd_validate_server(device);
  1274. dasd_alias_lcu_setup_complete(device);
  1275. } else
  1276. dasd_alias_wait_for_lcu_setup(device);
  1277. /* device may report different configuration data after LCU setup */
  1278. rc = dasd_eckd_read_conf(device);
  1279. if (rc)
  1280. goto out_err3;
  1281. /* Read Feature Codes */
  1282. dasd_eckd_read_features(device);
  1283. /* Read Device Characteristics */
  1284. rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
  1285. &private->rdc_data, 64);
  1286. if (rc) {
  1287. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  1288. "Read device characteristic failed, rc=%d", rc);
  1289. goto out_err3;
  1290. }
  1291. /* find the valid cylinder size */
  1292. if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
  1293. private->rdc_data.long_no_cyl)
  1294. private->real_cyl = private->rdc_data.long_no_cyl;
  1295. else
  1296. private->real_cyl = private->rdc_data.no_cyl;
  1297. private->fcx_max_data = get_fcx_max_data(device);
  1298. readonly = dasd_device_is_ro(device);
  1299. if (readonly)
  1300. set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
  1301. dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
  1302. "with %d cylinders, %d heads, %d sectors%s\n",
  1303. private->rdc_data.dev_type,
  1304. private->rdc_data.dev_model,
  1305. private->rdc_data.cu_type,
  1306. private->rdc_data.cu_model.model,
  1307. private->real_cyl,
  1308. private->rdc_data.trk_per_cyl,
  1309. private->rdc_data.sec_per_trk,
  1310. readonly ? ", read-only device" : "");
  1311. return 0;
  1312. out_err3:
  1313. dasd_alias_disconnect_device_from_lcu(device);
  1314. out_err2:
  1315. dasd_free_block(device->block);
  1316. device->block = NULL;
  1317. out_err1:
  1318. kfree(private->conf_data);
  1319. kfree(device->private);
  1320. device->private = NULL;
  1321. return rc;
  1322. }
  1323. static void dasd_eckd_uncheck_device(struct dasd_device *device)
  1324. {
  1325. struct dasd_eckd_private *private;
  1326. private = (struct dasd_eckd_private *) device->private;
  1327. dasd_alias_disconnect_device_from_lcu(device);
  1328. private->ned = NULL;
  1329. private->sneq = NULL;
  1330. private->vdsneq = NULL;
  1331. private->gneq = NULL;
  1332. private->conf_len = 0;
  1333. kfree(private->conf_data);
  1334. private->conf_data = NULL;
  1335. }
  1336. static struct dasd_ccw_req *
  1337. dasd_eckd_analysis_ccw(struct dasd_device *device)
  1338. {
  1339. struct dasd_eckd_private *private;
  1340. struct eckd_count *count_data;
  1341. struct LO_eckd_data *LO_data;
  1342. struct dasd_ccw_req *cqr;
  1343. struct ccw1 *ccw;
  1344. int cplength, datasize;
  1345. int i;
  1346. private = (struct dasd_eckd_private *) device->private;
  1347. cplength = 8;
  1348. datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
  1349. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
  1350. if (IS_ERR(cqr))
  1351. return cqr;
  1352. ccw = cqr->cpaddr;
  1353. /* Define extent for the first 3 tracks. */
  1354. define_extent(ccw++, cqr->data, 0, 2,
  1355. DASD_ECKD_CCW_READ_COUNT, device);
  1356. LO_data = cqr->data + sizeof(struct DE_eckd_data);
  1357. /* Locate record for the first 4 records on track 0. */
  1358. ccw[-1].flags |= CCW_FLAG_CC;
  1359. locate_record(ccw++, LO_data++, 0, 0, 4,
  1360. DASD_ECKD_CCW_READ_COUNT, device, 0);
  1361. count_data = private->count_area;
  1362. for (i = 0; i < 4; i++) {
  1363. ccw[-1].flags |= CCW_FLAG_CC;
  1364. ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
  1365. ccw->flags = 0;
  1366. ccw->count = 8;
  1367. ccw->cda = (__u32)(addr_t) count_data;
  1368. ccw++;
  1369. count_data++;
  1370. }
  1371. /* Locate record for the first record on track 2. */
  1372. ccw[-1].flags |= CCW_FLAG_CC;
  1373. locate_record(ccw++, LO_data++, 2, 0, 1,
  1374. DASD_ECKD_CCW_READ_COUNT, device, 0);
  1375. /* Read count ccw. */
  1376. ccw[-1].flags |= CCW_FLAG_CC;
  1377. ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
  1378. ccw->flags = 0;
  1379. ccw->count = 8;
  1380. ccw->cda = (__u32)(addr_t) count_data;
  1381. cqr->block = NULL;
  1382. cqr->startdev = device;
  1383. cqr->memdev = device;
  1384. cqr->retries = 255;
  1385. cqr->buildclk = get_clock();
  1386. cqr->status = DASD_CQR_FILLED;
  1387. return cqr;
  1388. }
  1389. /* differentiate between 'no record found' and any other error */
  1390. static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
  1391. {
  1392. char *sense;
  1393. if (init_cqr->status == DASD_CQR_DONE)
  1394. return INIT_CQR_OK;
  1395. else if (init_cqr->status == DASD_CQR_NEED_ERP ||
  1396. init_cqr->status == DASD_CQR_FAILED) {
  1397. sense = dasd_get_sense(&init_cqr->irb);
  1398. if (sense && (sense[1] & SNS1_NO_REC_FOUND))
  1399. return INIT_CQR_UNFORMATTED;
  1400. else
  1401. return INIT_CQR_ERROR;
  1402. } else
  1403. return INIT_CQR_ERROR;
  1404. }
  1405. /*
  1406. * This is the callback function for the init_analysis cqr. It saves
  1407. * the status of the initial analysis ccw before it frees it and kicks
  1408. * the device to continue the startup sequence. This will call
  1409. * dasd_eckd_do_analysis again (if the devices has not been marked
  1410. * for deletion in the meantime).
  1411. */
  1412. static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
  1413. void *data)
  1414. {
  1415. struct dasd_eckd_private *private;
  1416. struct dasd_device *device;
  1417. device = init_cqr->startdev;
  1418. private = (struct dasd_eckd_private *) device->private;
  1419. private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
  1420. dasd_sfree_request(init_cqr, device);
  1421. dasd_kick_device(device);
  1422. }
  1423. static int dasd_eckd_start_analysis(struct dasd_block *block)
  1424. {
  1425. struct dasd_eckd_private *private;
  1426. struct dasd_ccw_req *init_cqr;
  1427. private = (struct dasd_eckd_private *) block->base->private;
  1428. init_cqr = dasd_eckd_analysis_ccw(block->base);
  1429. if (IS_ERR(init_cqr))
  1430. return PTR_ERR(init_cqr);
  1431. init_cqr->callback = dasd_eckd_analysis_callback;
  1432. init_cqr->callback_data = NULL;
  1433. init_cqr->expires = 5*HZ;
  1434. /* first try without ERP, so we can later handle unformatted
  1435. * devices as special case
  1436. */
  1437. clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
  1438. init_cqr->retries = 0;
  1439. dasd_add_request_head(init_cqr);
  1440. return -EAGAIN;
  1441. }
  1442. static int dasd_eckd_end_analysis(struct dasd_block *block)
  1443. {
  1444. struct dasd_device *device;
  1445. struct dasd_eckd_private *private;
  1446. struct eckd_count *count_area;
  1447. unsigned int sb, blk_per_trk;
  1448. int status, i;
  1449. struct dasd_ccw_req *init_cqr;
  1450. device = block->base;
  1451. private = (struct dasd_eckd_private *) device->private;
  1452. status = private->init_cqr_status;
  1453. private->init_cqr_status = -1;
  1454. if (status == INIT_CQR_ERROR) {
  1455. /* try again, this time with full ERP */
  1456. init_cqr = dasd_eckd_analysis_ccw(device);
  1457. dasd_sleep_on(init_cqr);
  1458. status = dasd_eckd_analysis_evaluation(init_cqr);
  1459. dasd_sfree_request(init_cqr, device);
  1460. }
  1461. if (status == INIT_CQR_UNFORMATTED) {
  1462. dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
  1463. return -EMEDIUMTYPE;
  1464. } else if (status == INIT_CQR_ERROR) {
  1465. dev_err(&device->cdev->dev,
  1466. "Detecting the DASD disk layout failed because "
  1467. "of an I/O error\n");
  1468. return -EIO;
  1469. }
  1470. private->uses_cdl = 1;
  1471. /* Check Track 0 for Compatible Disk Layout */
  1472. count_area = NULL;
  1473. for (i = 0; i < 3; i++) {
  1474. if (private->count_area[i].kl != 4 ||
  1475. private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
  1476. private->uses_cdl = 0;
  1477. break;
  1478. }
  1479. }
  1480. if (i == 3)
  1481. count_area = &private->count_area[4];
  1482. if (private->uses_cdl == 0) {
  1483. for (i = 0; i < 5; i++) {
  1484. if ((private->count_area[i].kl != 0) ||
  1485. (private->count_area[i].dl !=
  1486. private->count_area[0].dl))
  1487. break;
  1488. }
  1489. if (i == 5)
  1490. count_area = &private->count_area[0];
  1491. } else {
  1492. if (private->count_area[3].record == 1)
  1493. dev_warn(&device->cdev->dev,
  1494. "Track 0 has no records following the VTOC\n");
  1495. }
  1496. if (count_area != NULL && count_area->kl == 0) {
  1497. /* we found notthing violating our disk layout */
  1498. if (dasd_check_blocksize(count_area->dl) == 0)
  1499. block->bp_block = count_area->dl;
  1500. }
  1501. if (block->bp_block == 0) {
  1502. dev_warn(&device->cdev->dev,
  1503. "The disk layout of the DASD is not supported\n");
  1504. return -EMEDIUMTYPE;
  1505. }
  1506. block->s2b_shift = 0; /* bits to shift 512 to get a block */
  1507. for (sb = 512; sb < block->bp_block; sb = sb << 1)
  1508. block->s2b_shift++;
  1509. blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
  1510. block->blocks = (private->real_cyl *
  1511. private->rdc_data.trk_per_cyl *
  1512. blk_per_trk);
  1513. dev_info(&device->cdev->dev,
  1514. "DASD with %d KB/block, %d KB total size, %d KB/track, "
  1515. "%s\n", (block->bp_block >> 10),
  1516. ((private->real_cyl *
  1517. private->rdc_data.trk_per_cyl *
  1518. blk_per_trk * (block->bp_block >> 9)) >> 1),
  1519. ((blk_per_trk * block->bp_block) >> 10),
  1520. private->uses_cdl ?
  1521. "compatible disk layout" : "linux disk layout");
  1522. return 0;
  1523. }
  1524. static int dasd_eckd_do_analysis(struct dasd_block *block)
  1525. {
  1526. struct dasd_eckd_private *private;
  1527. private = (struct dasd_eckd_private *) block->base->private;
  1528. if (private->init_cqr_status < 0)
  1529. return dasd_eckd_start_analysis(block);
  1530. else
  1531. return dasd_eckd_end_analysis(block);
  1532. }
  1533. static int dasd_eckd_ready_to_online(struct dasd_device *device)
  1534. {
  1535. return dasd_alias_add_device(device);
  1536. };
  1537. static int dasd_eckd_online_to_ready(struct dasd_device *device)
  1538. {
  1539. cancel_work_sync(&device->reload_device);
  1540. return dasd_alias_remove_device(device);
  1541. };
  1542. static int
  1543. dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
  1544. {
  1545. struct dasd_eckd_private *private;
  1546. private = (struct dasd_eckd_private *) block->base->private;
  1547. if (dasd_check_blocksize(block->bp_block) == 0) {
  1548. geo->sectors = recs_per_track(&private->rdc_data,
  1549. 0, block->bp_block);
  1550. }
  1551. geo->cylinders = private->rdc_data.no_cyl;
  1552. geo->heads = private->rdc_data.trk_per_cyl;
  1553. return 0;
  1554. }
  1555. static struct dasd_ccw_req *
  1556. dasd_eckd_format_device(struct dasd_device * device,
  1557. struct format_data_t * fdata)
  1558. {
  1559. struct dasd_eckd_private *private;
  1560. struct dasd_ccw_req *fcp;
  1561. struct eckd_count *ect;
  1562. struct ccw1 *ccw;
  1563. void *data;
  1564. int rpt;
  1565. struct ch_t address;
  1566. int cplength, datasize;
  1567. int i;
  1568. int intensity = 0;
  1569. int r0_perm;
  1570. private = (struct dasd_eckd_private *) device->private;
  1571. rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
  1572. set_ch_t(&address,
  1573. fdata->start_unit / private->rdc_data.trk_per_cyl,
  1574. fdata->start_unit % private->rdc_data.trk_per_cyl);
  1575. /* Sanity checks. */
  1576. if (fdata->start_unit >=
  1577. (private->real_cyl * private->rdc_data.trk_per_cyl)) {
  1578. dev_warn(&device->cdev->dev, "Start track number %d used in "
  1579. "formatting is too big\n", fdata->start_unit);
  1580. return ERR_PTR(-EINVAL);
  1581. }
  1582. if (fdata->start_unit > fdata->stop_unit) {
  1583. dev_warn(&device->cdev->dev, "Start track %d used in "
  1584. "formatting exceeds end track\n", fdata->start_unit);
  1585. return ERR_PTR(-EINVAL);
  1586. }
  1587. if (dasd_check_blocksize(fdata->blksize) != 0) {
  1588. dev_warn(&device->cdev->dev,
  1589. "The DASD cannot be formatted with block size %d\n",
  1590. fdata->blksize);
  1591. return ERR_PTR(-EINVAL);
  1592. }
  1593. /*
  1594. * fdata->intensity is a bit string that tells us what to do:
  1595. * Bit 0: write record zero
  1596. * Bit 1: write home address, currently not supported
  1597. * Bit 2: invalidate tracks
  1598. * Bit 3: use OS/390 compatible disk layout (cdl)
  1599. * Bit 4: do not allow storage subsystem to modify record zero
  1600. * Only some bit combinations do make sense.
  1601. */
  1602. if (fdata->intensity & 0x10) {
  1603. r0_perm = 0;
  1604. intensity = fdata->intensity & ~0x10;
  1605. } else {
  1606. r0_perm = 1;
  1607. intensity = fdata->intensity;
  1608. }
  1609. switch (intensity) {
  1610. case 0x00: /* Normal format */
  1611. case 0x08: /* Normal format, use cdl. */
  1612. cplength = 2 + rpt;
  1613. datasize = sizeof(struct DE_eckd_data) +
  1614. sizeof(struct LO_eckd_data) +
  1615. rpt * sizeof(struct eckd_count);
  1616. break;
  1617. case 0x01: /* Write record zero and format track. */
  1618. case 0x09: /* Write record zero and format track, use cdl. */
  1619. cplength = 3 + rpt;
  1620. datasize = sizeof(struct DE_eckd_data) +
  1621. sizeof(struct LO_eckd_data) +
  1622. sizeof(struct eckd_count) +
  1623. rpt * sizeof(struct eckd_count);
  1624. break;
  1625. case 0x04: /* Invalidate track. */
  1626. case 0x0c: /* Invalidate track, use cdl. */
  1627. cplength = 3;
  1628. datasize = sizeof(struct DE_eckd_data) +
  1629. sizeof(struct LO_eckd_data) +
  1630. sizeof(struct eckd_count);
  1631. break;
  1632. default:
  1633. dev_warn(&device->cdev->dev, "An I/O control call used "
  1634. "incorrect flags 0x%x\n", fdata->intensity);
  1635. return ERR_PTR(-EINVAL);
  1636. }
  1637. /* Allocate the format ccw request. */
  1638. fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
  1639. if (IS_ERR(fcp))
  1640. return fcp;
  1641. data = fcp->data;
  1642. ccw = fcp->cpaddr;
  1643. switch (intensity & ~0x08) {
  1644. case 0x00: /* Normal format. */
  1645. define_extent(ccw++, (struct DE_eckd_data *) data,
  1646. fdata->start_unit, fdata->start_unit,
  1647. DASD_ECKD_CCW_WRITE_CKD, device);
  1648. /* grant subsystem permission to format R0 */
  1649. if (r0_perm)
  1650. ((struct DE_eckd_data *)data)->ga_extended |= 0x04;
  1651. data += sizeof(struct DE_eckd_data);
  1652. ccw[-1].flags |= CCW_FLAG_CC;
  1653. locate_record(ccw++, (struct LO_eckd_data *) data,
  1654. fdata->start_unit, 0, rpt,
  1655. DASD_ECKD_CCW_WRITE_CKD, device,
  1656. fdata->blksize);
  1657. data += sizeof(struct LO_eckd_data);
  1658. break;
  1659. case 0x01: /* Write record zero + format track. */
  1660. define_extent(ccw++, (struct DE_eckd_data *) data,
  1661. fdata->start_unit, fdata->start_unit,
  1662. DASD_ECKD_CCW_WRITE_RECORD_ZERO,
  1663. device);
  1664. data += sizeof(struct DE_eckd_data);
  1665. ccw[-1].flags |= CCW_FLAG_CC;
  1666. locate_record(ccw++, (struct LO_eckd_data *) data,
  1667. fdata->start_unit, 0, rpt + 1,
  1668. DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
  1669. device->block->bp_block);
  1670. data += sizeof(struct LO_eckd_data);
  1671. break;
  1672. case 0x04: /* Invalidate track. */
  1673. define_extent(ccw++, (struct DE_eckd_data *) data,
  1674. fdata->start_unit, fdata->start_unit,
  1675. DASD_ECKD_CCW_WRITE_CKD, device);
  1676. data += sizeof(struct DE_eckd_data);
  1677. ccw[-1].flags |= CCW_FLAG_CC;
  1678. locate_record(ccw++, (struct LO_eckd_data *) data,
  1679. fdata->start_unit, 0, 1,
  1680. DASD_ECKD_CCW_WRITE_CKD, device, 8);
  1681. data += sizeof(struct LO_eckd_data);
  1682. break;
  1683. }
  1684. if (intensity & 0x01) { /* write record zero */
  1685. ect = (struct eckd_count *) data;
  1686. data += sizeof(struct eckd_count);
  1687. ect->cyl = address.cyl;
  1688. ect->head = address.head;
  1689. ect->record = 0;
  1690. ect->kl = 0;
  1691. ect->dl = 8;
  1692. ccw[-1].flags |= CCW_FLAG_CC;
  1693. ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
  1694. ccw->flags = CCW_FLAG_SLI;
  1695. ccw->count = 8;
  1696. ccw->cda = (__u32)(addr_t) ect;
  1697. ccw++;
  1698. }
  1699. if ((intensity & ~0x08) & 0x04) { /* erase track */
  1700. ect = (struct eckd_count *) data;
  1701. data += sizeof(struct eckd_count);
  1702. ect->cyl = address.cyl;
  1703. ect->head = address.head;
  1704. ect->record = 1;
  1705. ect->kl = 0;
  1706. ect->dl = 0;
  1707. ccw[-1].flags |= CCW_FLAG_CC;
  1708. ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
  1709. ccw->flags = CCW_FLAG_SLI;
  1710. ccw->count = 8;
  1711. ccw->cda = (__u32)(addr_t) ect;
  1712. } else { /* write remaining records */
  1713. for (i = 0; i < rpt; i++) {
  1714. ect = (struct eckd_count *) data;
  1715. data += sizeof(struct eckd_count);
  1716. ect->cyl = address.cyl;
  1717. ect->head = address.head;
  1718. ect->record = i + 1;
  1719. ect->kl = 0;
  1720. ect->dl = fdata->blksize;
  1721. /* Check for special tracks 0-1 when formatting CDL */
  1722. if ((intensity & 0x08) &&
  1723. fdata->start_unit == 0) {
  1724. if (i < 3) {
  1725. ect->kl = 4;
  1726. ect->dl = sizes_trk0[i] - 4;
  1727. }
  1728. }
  1729. if ((intensity & 0x08) &&
  1730. fdata->start_unit == 1) {
  1731. ect->kl = 44;
  1732. ect->dl = LABEL_SIZE - 44;
  1733. }
  1734. ccw[-1].flags |= CCW_FLAG_CC;
  1735. ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
  1736. ccw->flags = CCW_FLAG_SLI;
  1737. ccw->count = 8;
  1738. ccw->cda = (__u32)(addr_t) ect;
  1739. ccw++;
  1740. }
  1741. }
  1742. fcp->startdev = device;
  1743. fcp->memdev = device;
  1744. fcp->retries = 256;
  1745. fcp->buildclk = get_clock();
  1746. fcp->status = DASD_CQR_FILLED;
  1747. return fcp;
  1748. }
  1749. static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
  1750. {
  1751. cqr->status = DASD_CQR_FILLED;
  1752. if (cqr->block && (cqr->startdev != cqr->block->base)) {
  1753. dasd_eckd_reset_ccw_to_base_io(cqr);
  1754. cqr->startdev = cqr->block->base;
  1755. cqr->lpm = cqr->block->base->path_data.opm;
  1756. }
  1757. };
  1758. static dasd_erp_fn_t
  1759. dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
  1760. {
  1761. struct dasd_device *device = (struct dasd_device *) cqr->startdev;
  1762. struct ccw_device *cdev = device->cdev;
  1763. switch (cdev->id.cu_type) {
  1764. case 0x3990:
  1765. case 0x2105:
  1766. case 0x2107:
  1767. case 0x1750:
  1768. return dasd_3990_erp_action;
  1769. case 0x9343:
  1770. case 0x3880:
  1771. default:
  1772. return dasd_default_erp_action;
  1773. }
  1774. }
  1775. static dasd_erp_fn_t
  1776. dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
  1777. {
  1778. return dasd_default_erp_postaction;
  1779. }
  1780. static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
  1781. struct irb *irb)
  1782. {
  1783. char mask;
  1784. char *sense = NULL;
  1785. struct dasd_eckd_private *private;
  1786. private = (struct dasd_eckd_private *) device->private;
  1787. /* first of all check for state change pending interrupt */
  1788. mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
  1789. if ((scsw_dstat(&irb->scsw) & mask) == mask) {
  1790. /* for alias only and not in offline processing*/
  1791. if (!device->block && private->lcu &&
  1792. !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1793. /*
  1794. * the state change could be caused by an alias
  1795. * reassignment remove device from alias handling
  1796. * to prevent new requests from being scheduled on
  1797. * the wrong alias device
  1798. */
  1799. dasd_alias_remove_device(device);
  1800. /* schedule worker to reload device */
  1801. dasd_reload_device(device);
  1802. }
  1803. dasd_generic_handle_state_change(device);
  1804. return;
  1805. }
  1806. /* summary unit check */
  1807. sense = dasd_get_sense(irb);
  1808. if (sense && (sense[7] == 0x0D) &&
  1809. (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
  1810. dasd_alias_handle_summary_unit_check(device, irb);
  1811. return;
  1812. }
  1813. /* service information message SIM */
  1814. if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
  1815. ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
  1816. dasd_3990_erp_handle_sim(device, sense);
  1817. dasd_schedule_device_bh(device);
  1818. return;
  1819. }
  1820. if ((scsw_cc(&irb->scsw) == 1) && !sense &&
  1821. (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) &&
  1822. (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) &&
  1823. (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) {
  1824. /* fake irb do nothing, they are handled elsewhere */
  1825. dasd_schedule_device_bh(device);
  1826. return;
  1827. }
  1828. dasd_schedule_device_bh(device);
  1829. return;
  1830. };
  1831. static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
  1832. struct dasd_device *startdev,
  1833. struct dasd_block *block,
  1834. struct request *req,
  1835. sector_t first_rec,
  1836. sector_t last_rec,
  1837. sector_t first_trk,
  1838. sector_t last_trk,
  1839. unsigned int first_offs,
  1840. unsigned int last_offs,
  1841. unsigned int blk_per_trk,
  1842. unsigned int blksize)
  1843. {
  1844. struct dasd_eckd_private *private;
  1845. unsigned long *idaws;
  1846. struct LO_eckd_data *LO_data;
  1847. struct dasd_ccw_req *cqr;
  1848. struct ccw1 *ccw;
  1849. struct req_iterator iter;
  1850. struct bio_vec *bv;
  1851. char *dst;
  1852. unsigned int off;
  1853. int count, cidaw, cplength, datasize;
  1854. sector_t recid;
  1855. unsigned char cmd, rcmd;
  1856. int use_prefix;
  1857. struct dasd_device *basedev;
  1858. basedev = block->base;
  1859. private = (struct dasd_eckd_private *) basedev->private;
  1860. if (rq_data_dir(req) == READ)
  1861. cmd = DASD_ECKD_CCW_READ_MT;
  1862. else if (rq_data_dir(req) == WRITE)
  1863. cmd = DASD_ECKD_CCW_WRITE_MT;
  1864. else
  1865. return ERR_PTR(-EINVAL);
  1866. /* Check struct bio and count the number of blocks for the request. */
  1867. count = 0;
  1868. cidaw = 0;
  1869. rq_for_each_segment(bv, req, iter) {
  1870. if (bv->bv_len & (blksize - 1))
  1871. /* Eckd can only do full blocks. */
  1872. return ERR_PTR(-EINVAL);
  1873. count += bv->bv_len >> (block->s2b_shift + 9);
  1874. #if defined(CONFIG_64BIT)
  1875. if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
  1876. cidaw += bv->bv_len >> (block->s2b_shift + 9);
  1877. #endif
  1878. }
  1879. /* Paranoia. */
  1880. if (count != last_rec - first_rec + 1)
  1881. return ERR_PTR(-EINVAL);
  1882. /* use the prefix command if available */
  1883. use_prefix = private->features.feature[8] & 0x01;
  1884. if (use_prefix) {
  1885. /* 1x prefix + number of blocks */
  1886. cplength = 2 + count;
  1887. /* 1x prefix + cidaws*sizeof(long) */
  1888. datasize = sizeof(struct PFX_eckd_data) +
  1889. sizeof(struct LO_eckd_data) +
  1890. cidaw * sizeof(unsigned long);
  1891. } else {
  1892. /* 1x define extent + 1x locate record + number of blocks */
  1893. cplength = 2 + count;
  1894. /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
  1895. datasize = sizeof(struct DE_eckd_data) +
  1896. sizeof(struct LO_eckd_data) +
  1897. cidaw * sizeof(unsigned long);
  1898. }
  1899. /* Find out the number of additional locate record ccws for cdl. */
  1900. if (private->uses_cdl && first_rec < 2*blk_per_trk) {
  1901. if (last_rec >= 2*blk_per_trk)
  1902. count = 2*blk_per_trk - first_rec;
  1903. cplength += count;
  1904. datasize += count*sizeof(struct LO_eckd_data);
  1905. }
  1906. /* Allocate the ccw request. */
  1907. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
  1908. startdev);
  1909. if (IS_ERR(cqr))
  1910. return cqr;
  1911. ccw = cqr->cpaddr;
  1912. /* First ccw is define extent or prefix. */
  1913. if (use_prefix) {
  1914. if (prefix(ccw++, cqr->data, first_trk,
  1915. last_trk, cmd, basedev, startdev) == -EAGAIN) {
  1916. /* Clock not in sync and XRC is enabled.
  1917. * Try again later.
  1918. */
  1919. dasd_sfree_request(cqr, startdev);
  1920. return ERR_PTR(-EAGAIN);
  1921. }
  1922. idaws = (unsigned long *) (cqr->data +
  1923. sizeof(struct PFX_eckd_data));
  1924. } else {
  1925. if (define_extent(ccw++, cqr->data, first_trk,
  1926. last_trk, cmd, startdev) == -EAGAIN) {
  1927. /* Clock not in sync and XRC is enabled.
  1928. * Try again later.
  1929. */
  1930. dasd_sfree_request(cqr, startdev);
  1931. return ERR_PTR(-EAGAIN);
  1932. }
  1933. idaws = (unsigned long *) (cqr->data +
  1934. sizeof(struct DE_eckd_data));
  1935. }
  1936. /* Build locate_record+read/write/ccws. */
  1937. LO_data = (struct LO_eckd_data *) (idaws + cidaw);
  1938. recid = first_rec;
  1939. if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
  1940. /* Only standard blocks so there is just one locate record. */
  1941. ccw[-1].flags |= CCW_FLAG_CC;
  1942. locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
  1943. last_rec - recid + 1, cmd, basedev, blksize);
  1944. }
  1945. rq_for_each_segment(bv, req, iter) {
  1946. dst = page_address(bv->bv_page) + bv->bv_offset;
  1947. if (dasd_page_cache) {
  1948. char *copy = kmem_cache_alloc(dasd_page_cache,
  1949. GFP_DMA | __GFP_NOWARN);
  1950. if (copy && rq_data_dir(req) == WRITE)
  1951. memcpy(copy + bv->bv_offset, dst, bv->bv_len);
  1952. if (copy)
  1953. dst = copy + bv->bv_offset;
  1954. }
  1955. for (off = 0; off < bv->bv_len; off += blksize) {
  1956. sector_t trkid = recid;
  1957. unsigned int recoffs = sector_div(trkid, blk_per_trk);
  1958. rcmd = cmd;
  1959. count = blksize;
  1960. /* Locate record for cdl special block ? */
  1961. if (private->uses_cdl && recid < 2*blk_per_trk) {
  1962. if (dasd_eckd_cdl_special(blk_per_trk, recid)){
  1963. rcmd |= 0x8;
  1964. count = dasd_eckd_cdl_reclen(recid);
  1965. if (count < blksize &&
  1966. rq_data_dir(req) == READ)
  1967. memset(dst + count, 0xe5,
  1968. blksize - count);
  1969. }
  1970. ccw[-1].flags |= CCW_FLAG_CC;
  1971. locate_record(ccw++, LO_data++,
  1972. trkid, recoffs + 1,
  1973. 1, rcmd, basedev, count);
  1974. }
  1975. /* Locate record for standard blocks ? */
  1976. if (private->uses_cdl && recid == 2*blk_per_trk) {
  1977. ccw[-1].flags |= CCW_FLAG_CC;
  1978. locate_record(ccw++, LO_data++,
  1979. trkid, recoffs + 1,
  1980. last_rec - recid + 1,
  1981. cmd, basedev, count);
  1982. }
  1983. /* Read/write ccw. */
  1984. ccw[-1].flags |= CCW_FLAG_CC;
  1985. ccw->cmd_code = rcmd;
  1986. ccw->count = count;
  1987. if (idal_is_needed(dst, blksize)) {
  1988. ccw->cda = (__u32)(addr_t) idaws;
  1989. ccw->flags = CCW_FLAG_IDA;
  1990. idaws = idal_create_words(idaws, dst, blksize);
  1991. } else {
  1992. ccw->cda = (__u32)(addr_t) dst;
  1993. ccw->flags = 0;
  1994. }
  1995. ccw++;
  1996. dst += blksize;
  1997. recid++;
  1998. }
  1999. }
  2000. if (blk_noretry_request(req) ||
  2001. block->base->features & DASD_FEATURE_FAILFAST)
  2002. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2003. cqr->startdev = startdev;
  2004. cqr->memdev = startdev;
  2005. cqr->block = block;
  2006. cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
  2007. cqr->lpm = startdev->path_data.ppm;
  2008. cqr->retries = 256;
  2009. cqr->buildclk = get_clock();
  2010. cqr->status = DASD_CQR_FILLED;
  2011. return cqr;
  2012. }
  2013. static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
  2014. struct dasd_device *startdev,
  2015. struct dasd_block *block,
  2016. struct request *req,
  2017. sector_t first_rec,
  2018. sector_t last_rec,
  2019. sector_t first_trk,
  2020. sector_t last_trk,
  2021. unsigned int first_offs,
  2022. unsigned int last_offs,
  2023. unsigned int blk_per_trk,
  2024. unsigned int blksize)
  2025. {
  2026. struct dasd_eckd_private *private;
  2027. unsigned long *idaws;
  2028. struct dasd_ccw_req *cqr;
  2029. struct ccw1 *ccw;
  2030. struct req_iterator iter;
  2031. struct bio_vec *bv;
  2032. char *dst, *idaw_dst;
  2033. unsigned int cidaw, cplength, datasize;
  2034. unsigned int tlf;
  2035. sector_t recid;
  2036. unsigned char cmd;
  2037. struct dasd_device *basedev;
  2038. unsigned int trkcount, count, count_to_trk_end;
  2039. unsigned int idaw_len, seg_len, part_len, len_to_track_end;
  2040. unsigned char new_track, end_idaw;
  2041. sector_t trkid;
  2042. unsigned int recoffs;
  2043. basedev = block->base;
  2044. private = (struct dasd_eckd_private *) basedev->private;
  2045. if (rq_data_dir(req) == READ)
  2046. cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
  2047. else if (rq_data_dir(req) == WRITE)
  2048. cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
  2049. else
  2050. return ERR_PTR(-EINVAL);
  2051. /* Track based I/O needs IDAWs for each page, and not just for
  2052. * 64 bit addresses. We need additional idals for pages
  2053. * that get filled from two tracks, so we use the number
  2054. * of records as upper limit.
  2055. */
  2056. cidaw = last_rec - first_rec + 1;
  2057. trkcount = last_trk - first_trk + 1;
  2058. /* 1x prefix + one read/write ccw per track */
  2059. cplength = 1 + trkcount;
  2060. /* on 31-bit we need space for two 32 bit addresses per page
  2061. * on 64-bit one 64 bit address
  2062. */
  2063. datasize = sizeof(struct PFX_eckd_data) +
  2064. cidaw * sizeof(unsigned long long);
  2065. /* Allocate the ccw request. */
  2066. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
  2067. startdev);
  2068. if (IS_ERR(cqr))
  2069. return cqr;
  2070. ccw = cqr->cpaddr;
  2071. /* transfer length factor: how many bytes to read from the last track */
  2072. if (first_trk == last_trk)
  2073. tlf = last_offs - first_offs + 1;
  2074. else
  2075. tlf = last_offs + 1;
  2076. tlf *= blksize;
  2077. if (prefix_LRE(ccw++, cqr->data, first_trk,
  2078. last_trk, cmd, basedev, startdev,
  2079. 1 /* format */, first_offs + 1,
  2080. trkcount, blksize,
  2081. tlf) == -EAGAIN) {
  2082. /* Clock not in sync and XRC is enabled.
  2083. * Try again later.
  2084. */
  2085. dasd_sfree_request(cqr, startdev);
  2086. return ERR_PTR(-EAGAIN);
  2087. }
  2088. /*
  2089. * The translation of request into ccw programs must meet the
  2090. * following conditions:
  2091. * - all idaws but the first and the last must address full pages
  2092. * (or 2K blocks on 31-bit)
  2093. * - the scope of a ccw and it's idal ends with the track boundaries
  2094. */
  2095. idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
  2096. recid = first_rec;
  2097. new_track = 1;
  2098. end_idaw = 0;
  2099. len_to_track_end = 0;
  2100. idaw_dst = 0;
  2101. idaw_len = 0;
  2102. rq_for_each_segment(bv, req, iter) {
  2103. dst = page_address(bv->bv_page) + bv->bv_offset;
  2104. seg_len = bv->bv_len;
  2105. while (seg_len) {
  2106. if (new_track) {
  2107. trkid = recid;
  2108. recoffs = sector_div(trkid, blk_per_trk);
  2109. count_to_trk_end = blk_per_trk - recoffs;
  2110. count = min((last_rec - recid + 1),
  2111. (sector_t)count_to_trk_end);
  2112. len_to_track_end = count * blksize;
  2113. ccw[-1].flags |= CCW_FLAG_CC;
  2114. ccw->cmd_code = cmd;
  2115. ccw->count = len_to_track_end;
  2116. ccw->cda = (__u32)(addr_t)idaws;
  2117. ccw->flags = CCW_FLAG_IDA;
  2118. ccw++;
  2119. recid += count;
  2120. new_track = 0;
  2121. /* first idaw for a ccw may start anywhere */
  2122. if (!idaw_dst)
  2123. idaw_dst = dst;
  2124. }
  2125. /* If we start a new idaw, we must make sure that it
  2126. * starts on an IDA_BLOCK_SIZE boundary.
  2127. * If we continue an idaw, we must make sure that the
  2128. * current segment begins where the so far accumulated
  2129. * idaw ends
  2130. */
  2131. if (!idaw_dst) {
  2132. if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
  2133. dasd_sfree_request(cqr, startdev);
  2134. return ERR_PTR(-ERANGE);
  2135. } else
  2136. idaw_dst = dst;
  2137. }
  2138. if ((idaw_dst + idaw_len) != dst) {
  2139. dasd_sfree_request(cqr, startdev);
  2140. return ERR_PTR(-ERANGE);
  2141. }
  2142. part_len = min(seg_len, len_to_track_end);
  2143. seg_len -= part_len;
  2144. dst += part_len;
  2145. idaw_len += part_len;
  2146. len_to_track_end -= part_len;
  2147. /* collected memory area ends on an IDA_BLOCK border,
  2148. * -> create an idaw
  2149. * idal_create_words will handle cases where idaw_len
  2150. * is larger then IDA_BLOCK_SIZE
  2151. */
  2152. if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
  2153. end_idaw = 1;
  2154. /* We also need to end the idaw at track end */
  2155. if (!len_to_track_end) {
  2156. new_track = 1;
  2157. end_idaw = 1;
  2158. }
  2159. if (end_idaw) {
  2160. idaws = idal_create_words(idaws, idaw_dst,
  2161. idaw_len);
  2162. idaw_dst = 0;
  2163. idaw_len = 0;
  2164. end_idaw = 0;
  2165. }
  2166. }
  2167. }
  2168. if (blk_noretry_request(req) ||
  2169. block->base->features & DASD_FEATURE_FAILFAST)
  2170. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2171. cqr->startdev = startdev;
  2172. cqr->memdev = startdev;
  2173. cqr->block = block;
  2174. cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
  2175. cqr->lpm = startdev->path_data.ppm;
  2176. cqr->retries = 256;
  2177. cqr->buildclk = get_clock();
  2178. cqr->status = DASD_CQR_FILLED;
  2179. return cqr;
  2180. }
  2181. static int prepare_itcw(struct itcw *itcw,
  2182. unsigned int trk, unsigned int totrk, int cmd,
  2183. struct dasd_device *basedev,
  2184. struct dasd_device *startdev,
  2185. unsigned int rec_on_trk, int count,
  2186. unsigned int blksize,
  2187. unsigned int total_data_size,
  2188. unsigned int tlf,
  2189. unsigned int blk_per_trk)
  2190. {
  2191. struct PFX_eckd_data pfxdata;
  2192. struct dasd_eckd_private *basepriv, *startpriv;
  2193. struct DE_eckd_data *dedata;
  2194. struct LRE_eckd_data *lredata;
  2195. struct dcw *dcw;
  2196. u32 begcyl, endcyl;
  2197. u16 heads, beghead, endhead;
  2198. u8 pfx_cmd;
  2199. int rc = 0;
  2200. int sector = 0;
  2201. int dn, d;
  2202. /* setup prefix data */
  2203. basepriv = (struct dasd_eckd_private *) basedev->private;
  2204. startpriv = (struct dasd_eckd_private *) startdev->private;
  2205. dedata = &pfxdata.define_extent;
  2206. lredata = &pfxdata.locate_record;
  2207. memset(&pfxdata, 0, sizeof(pfxdata));
  2208. pfxdata.format = 1; /* PFX with LRE */
  2209. pfxdata.base_address = basepriv->ned->unit_addr;
  2210. pfxdata.base_lss = basepriv->ned->ID;
  2211. pfxdata.validity.define_extent = 1;
  2212. /* private uid is kept up to date, conf_data may be outdated */
  2213. if (startpriv->uid.type != UA_BASE_DEVICE) {
  2214. pfxdata.validity.verify_base = 1;
  2215. if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
  2216. pfxdata.validity.hyper_pav = 1;
  2217. }
  2218. switch (cmd) {
  2219. case DASD_ECKD_CCW_READ_TRACK_DATA:
  2220. dedata->mask.perm = 0x1;
  2221. dedata->attributes.operation = basepriv->attrib.operation;
  2222. dedata->blk_size = blksize;
  2223. dedata->ga_extended |= 0x42;
  2224. lredata->operation.orientation = 0x0;
  2225. lredata->operation.operation = 0x0C;
  2226. lredata->auxiliary.check_bytes = 0x01;
  2227. pfx_cmd = DASD_ECKD_CCW_PFX_READ;
  2228. break;
  2229. case DASD_ECKD_CCW_WRITE_TRACK_DATA:
  2230. dedata->mask.perm = 0x02;
  2231. dedata->attributes.operation = basepriv->attrib.operation;
  2232. dedata->blk_size = blksize;
  2233. rc = check_XRC_on_prefix(&pfxdata, basedev);
  2234. dedata->ga_extended |= 0x42;
  2235. lredata->operation.orientation = 0x0;
  2236. lredata->operation.operation = 0x3F;
  2237. lredata->extended_operation = 0x23;
  2238. lredata->auxiliary.check_bytes = 0x2;
  2239. pfx_cmd = DASD_ECKD_CCW_PFX;
  2240. break;
  2241. default:
  2242. DBF_DEV_EVENT(DBF_ERR, basedev,
  2243. "prepare itcw, unknown opcode 0x%x", cmd);
  2244. BUG();
  2245. break;
  2246. }
  2247. if (rc)
  2248. return rc;
  2249. dedata->attributes.mode = 0x3; /* ECKD */
  2250. heads = basepriv->rdc_data.trk_per_cyl;
  2251. begcyl = trk / heads;
  2252. beghead = trk % heads;
  2253. endcyl = totrk / heads;
  2254. endhead = totrk % heads;
  2255. /* check for sequential prestage - enhance cylinder range */
  2256. if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
  2257. dedata->attributes.operation == DASD_SEQ_ACCESS) {
  2258. if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
  2259. endcyl += basepriv->attrib.nr_cyl;
  2260. else
  2261. endcyl = (basepriv->real_cyl - 1);
  2262. }
  2263. set_ch_t(&dedata->beg_ext, begcyl, beghead);
  2264. set_ch_t(&dedata->end_ext, endcyl, endhead);
  2265. dedata->ep_format = 0x20; /* records per track is valid */
  2266. dedata->ep_rec_per_track = blk_per_trk;
  2267. if (rec_on_trk) {
  2268. switch (basepriv->rdc_data.dev_type) {
  2269. case 0x3390:
  2270. dn = ceil_quot(blksize + 6, 232);
  2271. d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
  2272. sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
  2273. break;
  2274. case 0x3380:
  2275. d = 7 + ceil_quot(blksize + 12, 32);
  2276. sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
  2277. break;
  2278. }
  2279. }
  2280. lredata->auxiliary.length_valid = 1;
  2281. lredata->auxiliary.length_scope = 1;
  2282. lredata->auxiliary.imbedded_ccw_valid = 1;
  2283. lredata->length = tlf;
  2284. lredata->imbedded_ccw = cmd;
  2285. lredata->count = count;
  2286. lredata->sector = sector;
  2287. set_ch_t(&lredata->seek_addr, begcyl, beghead);
  2288. lredata->search_arg.cyl = lredata->seek_addr.cyl;
  2289. lredata->search_arg.head = lredata->seek_addr.head;
  2290. lredata->search_arg.record = rec_on_trk;
  2291. dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
  2292. &pfxdata, sizeof(pfxdata), total_data_size);
  2293. return rc;
  2294. }
  2295. static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
  2296. struct dasd_device *startdev,
  2297. struct dasd_block *block,
  2298. struct request *req,
  2299. sector_t first_rec,
  2300. sector_t last_rec,
  2301. sector_t first_trk,
  2302. sector_t last_trk,
  2303. unsigned int first_offs,
  2304. unsigned int last_offs,
  2305. unsigned int blk_per_trk,
  2306. unsigned int blksize)
  2307. {
  2308. struct dasd_eckd_private *private;
  2309. struct dasd_ccw_req *cqr;
  2310. struct req_iterator iter;
  2311. struct bio_vec *bv;
  2312. char *dst;
  2313. unsigned int trkcount, ctidaw;
  2314. unsigned char cmd;
  2315. struct dasd_device *basedev;
  2316. unsigned int tlf;
  2317. struct itcw *itcw;
  2318. struct tidaw *last_tidaw = NULL;
  2319. int itcw_op;
  2320. size_t itcw_size;
  2321. u8 tidaw_flags;
  2322. unsigned int seg_len, part_len, len_to_track_end;
  2323. unsigned char new_track;
  2324. sector_t recid, trkid;
  2325. unsigned int offs;
  2326. unsigned int count, count_to_trk_end;
  2327. basedev = block->base;
  2328. private = (struct dasd_eckd_private *) basedev->private;
  2329. if (rq_data_dir(req) == READ) {
  2330. cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
  2331. itcw_op = ITCW_OP_READ;
  2332. } else if (rq_data_dir(req) == WRITE) {
  2333. cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
  2334. itcw_op = ITCW_OP_WRITE;
  2335. } else
  2336. return ERR_PTR(-EINVAL);
  2337. /* trackbased I/O needs address all memory via TIDAWs,
  2338. * not just for 64 bit addresses. This allows us to map
  2339. * each segment directly to one tidaw.
  2340. * In the case of write requests, additional tidaws may
  2341. * be needed when a segment crosses a track boundary.
  2342. */
  2343. trkcount = last_trk - first_trk + 1;
  2344. ctidaw = 0;
  2345. rq_for_each_segment(bv, req, iter) {
  2346. ++ctidaw;
  2347. }
  2348. if (rq_data_dir(req) == WRITE)
  2349. ctidaw += (last_trk - first_trk);
  2350. /* Allocate the ccw request. */
  2351. itcw_size = itcw_calc_size(0, ctidaw, 0);
  2352. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
  2353. if (IS_ERR(cqr))
  2354. return cqr;
  2355. /* transfer length factor: how many bytes to read from the last track */
  2356. if (first_trk == last_trk)
  2357. tlf = last_offs - first_offs + 1;
  2358. else
  2359. tlf = last_offs + 1;
  2360. tlf *= blksize;
  2361. itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
  2362. if (IS_ERR(itcw)) {
  2363. dasd_sfree_request(cqr, startdev);
  2364. return ERR_PTR(-EINVAL);
  2365. }
  2366. cqr->cpaddr = itcw_get_tcw(itcw);
  2367. if (prepare_itcw(itcw, first_trk, last_trk,
  2368. cmd, basedev, startdev,
  2369. first_offs + 1,
  2370. trkcount, blksize,
  2371. (last_rec - first_rec + 1) * blksize,
  2372. tlf, blk_per_trk) == -EAGAIN) {
  2373. /* Clock not in sync and XRC is enabled.
  2374. * Try again later.
  2375. */
  2376. dasd_sfree_request(cqr, startdev);
  2377. return ERR_PTR(-EAGAIN);
  2378. }
  2379. /*
  2380. * A tidaw can address 4k of memory, but must not cross page boundaries
  2381. * We can let the block layer handle this by setting
  2382. * blk_queue_segment_boundary to page boundaries and
  2383. * blk_max_segment_size to page size when setting up the request queue.
  2384. * For write requests, a TIDAW must not cross track boundaries, because
  2385. * we have to set the CBC flag on the last tidaw for each track.
  2386. */
  2387. if (rq_data_dir(req) == WRITE) {
  2388. new_track = 1;
  2389. recid = first_rec;
  2390. rq_for_each_segment(bv, req, iter) {
  2391. dst = page_address(bv->bv_page) + bv->bv_offset;
  2392. seg_len = bv->bv_len;
  2393. while (seg_len) {
  2394. if (new_track) {
  2395. trkid = recid;
  2396. offs = sector_div(trkid, blk_per_trk);
  2397. count_to_trk_end = blk_per_trk - offs;
  2398. count = min((last_rec - recid + 1),
  2399. (sector_t)count_to_trk_end);
  2400. len_to_track_end = count * blksize;
  2401. recid += count;
  2402. new_track = 0;
  2403. }
  2404. part_len = min(seg_len, len_to_track_end);
  2405. seg_len -= part_len;
  2406. len_to_track_end -= part_len;
  2407. /* We need to end the tidaw at track end */
  2408. if (!len_to_track_end) {
  2409. new_track = 1;
  2410. tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
  2411. } else
  2412. tidaw_flags = 0;
  2413. last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
  2414. dst, part_len);
  2415. if (IS_ERR(last_tidaw))
  2416. return ERR_PTR(-EINVAL);
  2417. dst += part_len;
  2418. }
  2419. }
  2420. } else {
  2421. rq_for_each_segment(bv, req, iter) {
  2422. dst = page_address(bv->bv_page) + bv->bv_offset;
  2423. last_tidaw = itcw_add_tidaw(itcw, 0x00,
  2424. dst, bv->bv_len);
  2425. if (IS_ERR(last_tidaw))
  2426. return ERR_PTR(-EINVAL);
  2427. }
  2428. }
  2429. last_tidaw->flags |= TIDAW_FLAGS_LAST;
  2430. last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
  2431. itcw_finalize(itcw);
  2432. if (blk_noretry_request(req) ||
  2433. block->base->features & DASD_FEATURE_FAILFAST)
  2434. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2435. cqr->cpmode = 1;
  2436. cqr->startdev = startdev;
  2437. cqr->memdev = startdev;
  2438. cqr->block = block;
  2439. cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
  2440. cqr->lpm = startdev->path_data.ppm;
  2441. cqr->retries = 256;
  2442. cqr->buildclk = get_clock();
  2443. cqr->status = DASD_CQR_FILLED;
  2444. return cqr;
  2445. }
  2446. static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
  2447. struct dasd_block *block,
  2448. struct request *req)
  2449. {
  2450. int cmdrtd, cmdwtd;
  2451. int use_prefix;
  2452. int fcx_multitrack;
  2453. struct dasd_eckd_private *private;
  2454. struct dasd_device *basedev;
  2455. sector_t first_rec, last_rec;
  2456. sector_t first_trk, last_trk;
  2457. unsigned int first_offs, last_offs;
  2458. unsigned int blk_per_trk, blksize;
  2459. int cdlspecial;
  2460. unsigned int data_size;
  2461. struct dasd_ccw_req *cqr;
  2462. basedev = block->base;
  2463. private = (struct dasd_eckd_private *) basedev->private;
  2464. /* Calculate number of blocks/records per track. */
  2465. blksize = block->bp_block;
  2466. blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
  2467. if (blk_per_trk == 0)
  2468. return ERR_PTR(-EINVAL);
  2469. /* Calculate record id of first and last block. */
  2470. first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
  2471. first_offs = sector_div(first_trk, blk_per_trk);
  2472. last_rec = last_trk =
  2473. (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
  2474. last_offs = sector_div(last_trk, blk_per_trk);
  2475. cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
  2476. fcx_multitrack = private->features.feature[40] & 0x20;
  2477. data_size = blk_rq_bytes(req);
  2478. /* tpm write request add CBC data on each track boundary */
  2479. if (rq_data_dir(req) == WRITE)
  2480. data_size += (last_trk - first_trk) * 4;
  2481. /* is read track data and write track data in command mode supported? */
  2482. cmdrtd = private->features.feature[9] & 0x20;
  2483. cmdwtd = private->features.feature[12] & 0x40;
  2484. use_prefix = private->features.feature[8] & 0x01;
  2485. cqr = NULL;
  2486. if (cdlspecial || dasd_page_cache) {
  2487. /* do nothing, just fall through to the cmd mode single case */
  2488. } else if ((data_size <= private->fcx_max_data)
  2489. && (fcx_multitrack || (first_trk == last_trk))) {
  2490. cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
  2491. first_rec, last_rec,
  2492. first_trk, last_trk,
  2493. first_offs, last_offs,
  2494. blk_per_trk, blksize);
  2495. if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
  2496. (PTR_ERR(cqr) != -ENOMEM))
  2497. cqr = NULL;
  2498. } else if (use_prefix &&
  2499. (((rq_data_dir(req) == READ) && cmdrtd) ||
  2500. ((rq_data_dir(req) == WRITE) && cmdwtd))) {
  2501. cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
  2502. first_rec, last_rec,
  2503. first_trk, last_trk,
  2504. first_offs, last_offs,
  2505. blk_per_trk, blksize);
  2506. if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
  2507. (PTR_ERR(cqr) != -ENOMEM))
  2508. cqr = NULL;
  2509. }
  2510. if (!cqr)
  2511. cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
  2512. first_rec, last_rec,
  2513. first_trk, last_trk,
  2514. first_offs, last_offs,
  2515. blk_per_trk, blksize);
  2516. return cqr;
  2517. }
  2518. static int
  2519. dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
  2520. {
  2521. struct dasd_eckd_private *private;
  2522. struct ccw1 *ccw;
  2523. struct req_iterator iter;
  2524. struct bio_vec *bv;
  2525. char *dst, *cda;
  2526. unsigned int blksize, blk_per_trk, off;
  2527. sector_t recid;
  2528. int status;
  2529. if (!dasd_page_cache)
  2530. goto out;
  2531. private = (struct dasd_eckd_private *) cqr->block->base->private;
  2532. blksize = cqr->block->bp_block;
  2533. blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
  2534. recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
  2535. ccw = cqr->cpaddr;
  2536. /* Skip over define extent & locate record. */
  2537. ccw++;
  2538. if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
  2539. ccw++;
  2540. rq_for_each_segment(bv, req, iter) {
  2541. dst = page_address(bv->bv_page) + bv->bv_offset;
  2542. for (off = 0; off < bv->bv_len; off += blksize) {
  2543. /* Skip locate record. */
  2544. if (private->uses_cdl && recid <= 2*blk_per_trk)
  2545. ccw++;
  2546. if (dst) {
  2547. if (ccw->flags & CCW_FLAG_IDA)
  2548. cda = *((char **)((addr_t) ccw->cda));
  2549. else
  2550. cda = (char *)((addr_t) ccw->cda);
  2551. if (dst != cda) {
  2552. if (rq_data_dir(req) == READ)
  2553. memcpy(dst, cda, bv->bv_len);
  2554. kmem_cache_free(dasd_page_cache,
  2555. (void *)((addr_t)cda & PAGE_MASK));
  2556. }
  2557. dst = NULL;
  2558. }
  2559. ccw++;
  2560. recid++;
  2561. }
  2562. }
  2563. out:
  2564. status = cqr->status == DASD_CQR_DONE;
  2565. dasd_sfree_request(cqr, cqr->memdev);
  2566. return status;
  2567. }
  2568. /*
  2569. * Modify ccw/tcw in cqr so it can be started on a base device.
  2570. *
  2571. * Note that this is not enough to restart the cqr!
  2572. * Either reset cqr->startdev as well (summary unit check handling)
  2573. * or restart via separate cqr (as in ERP handling).
  2574. */
  2575. void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
  2576. {
  2577. struct ccw1 *ccw;
  2578. struct PFX_eckd_data *pfxdata;
  2579. struct tcw *tcw;
  2580. struct tccb *tccb;
  2581. struct dcw *dcw;
  2582. if (cqr->cpmode == 1) {
  2583. tcw = cqr->cpaddr;
  2584. tccb = tcw_get_tccb(tcw);
  2585. dcw = (struct dcw *)&tccb->tca[0];
  2586. pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
  2587. pfxdata->validity.verify_base = 0;
  2588. pfxdata->validity.hyper_pav = 0;
  2589. } else {
  2590. ccw = cqr->cpaddr;
  2591. pfxdata = cqr->data;
  2592. if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
  2593. pfxdata->validity.verify_base = 0;
  2594. pfxdata->validity.hyper_pav = 0;
  2595. }
  2596. }
  2597. }
  2598. #define DASD_ECKD_CHANQ_MAX_SIZE 4
  2599. static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
  2600. struct dasd_block *block,
  2601. struct request *req)
  2602. {
  2603. struct dasd_eckd_private *private;
  2604. struct dasd_device *startdev;
  2605. unsigned long flags;
  2606. struct dasd_ccw_req *cqr;
  2607. startdev = dasd_alias_get_start_dev(base);
  2608. if (!startdev)
  2609. startdev = base;
  2610. private = (struct dasd_eckd_private *) startdev->private;
  2611. if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
  2612. return ERR_PTR(-EBUSY);
  2613. spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
  2614. private->count++;
  2615. cqr = dasd_eckd_build_cp(startdev, block, req);
  2616. if (IS_ERR(cqr))
  2617. private->count--;
  2618. spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
  2619. return cqr;
  2620. }
  2621. static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
  2622. struct request *req)
  2623. {
  2624. struct dasd_eckd_private *private;
  2625. unsigned long flags;
  2626. spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
  2627. private = (struct dasd_eckd_private *) cqr->memdev->private;
  2628. private->count--;
  2629. spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
  2630. return dasd_eckd_free_cp(cqr, req);
  2631. }
  2632. static int
  2633. dasd_eckd_fill_info(struct dasd_device * device,
  2634. struct dasd_information2_t * info)
  2635. {
  2636. struct dasd_eckd_private *private;
  2637. private = (struct dasd_eckd_private *) device->private;
  2638. info->label_block = 2;
  2639. info->FBA_layout = private->uses_cdl ? 0 : 1;
  2640. info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
  2641. info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
  2642. memcpy(info->characteristics, &private->rdc_data,
  2643. sizeof(struct dasd_eckd_characteristics));
  2644. info->confdata_size = min((unsigned long)private->conf_len,
  2645. sizeof(info->configuration_data));
  2646. memcpy(info->configuration_data, private->conf_data,
  2647. info->confdata_size);
  2648. return 0;
  2649. }
  2650. /*
  2651. * SECTION: ioctl functions for eckd devices.
  2652. */
  2653. /*
  2654. * Release device ioctl.
  2655. * Buils a channel programm to releases a prior reserved
  2656. * (see dasd_eckd_reserve) device.
  2657. */
  2658. static int
  2659. dasd_eckd_release(struct dasd_device *device)
  2660. {
  2661. struct dasd_ccw_req *cqr;
  2662. int rc;
  2663. struct ccw1 *ccw;
  2664. int useglobal;
  2665. if (!capable(CAP_SYS_ADMIN))
  2666. return -EACCES;
  2667. useglobal = 0;
  2668. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
  2669. if (IS_ERR(cqr)) {
  2670. mutex_lock(&dasd_reserve_mutex);
  2671. useglobal = 1;
  2672. cqr = &dasd_reserve_req->cqr;
  2673. memset(cqr, 0, sizeof(*cqr));
  2674. memset(&dasd_reserve_req->ccw, 0,
  2675. sizeof(dasd_reserve_req->ccw));
  2676. cqr->cpaddr = &dasd_reserve_req->ccw;
  2677. cqr->data = &dasd_reserve_req->data;
  2678. cqr->magic = DASD_ECKD_MAGIC;
  2679. }
  2680. ccw = cqr->cpaddr;
  2681. ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
  2682. ccw->flags |= CCW_FLAG_SLI;
  2683. ccw->count = 32;
  2684. ccw->cda = (__u32)(addr_t) cqr->data;
  2685. cqr->startdev = device;
  2686. cqr->memdev = device;
  2687. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  2688. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2689. cqr->retries = 2; /* set retry counter to enable basic ERP */
  2690. cqr->expires = 2 * HZ;
  2691. cqr->buildclk = get_clock();
  2692. cqr->status = DASD_CQR_FILLED;
  2693. rc = dasd_sleep_on_immediatly(cqr);
  2694. if (useglobal)
  2695. mutex_unlock(&dasd_reserve_mutex);
  2696. else
  2697. dasd_sfree_request(cqr, cqr->memdev);
  2698. return rc;
  2699. }
  2700. /*
  2701. * Reserve device ioctl.
  2702. * Options are set to 'synchronous wait for interrupt' and
  2703. * 'timeout the request'. This leads to a terminate IO if
  2704. * the interrupt is outstanding for a certain time.
  2705. */
  2706. static int
  2707. dasd_eckd_reserve(struct dasd_device *device)
  2708. {
  2709. struct dasd_ccw_req *cqr;
  2710. int rc;
  2711. struct ccw1 *ccw;
  2712. int useglobal;
  2713. if (!capable(CAP_SYS_ADMIN))
  2714. return -EACCES;
  2715. useglobal = 0;
  2716. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
  2717. if (IS_ERR(cqr)) {
  2718. mutex_lock(&dasd_reserve_mutex);
  2719. useglobal = 1;
  2720. cqr = &dasd_reserve_req->cqr;
  2721. memset(cqr, 0, sizeof(*cqr));
  2722. memset(&dasd_reserve_req->ccw, 0,
  2723. sizeof(dasd_reserve_req->ccw));
  2724. cqr->cpaddr = &dasd_reserve_req->ccw;
  2725. cqr->data = &dasd_reserve_req->data;
  2726. cqr->magic = DASD_ECKD_MAGIC;
  2727. }
  2728. ccw = cqr->cpaddr;
  2729. ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
  2730. ccw->flags |= CCW_FLAG_SLI;
  2731. ccw->count = 32;
  2732. ccw->cda = (__u32)(addr_t) cqr->data;
  2733. cqr->startdev = device;
  2734. cqr->memdev = device;
  2735. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  2736. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2737. cqr->retries = 2; /* set retry counter to enable basic ERP */
  2738. cqr->expires = 2 * HZ;
  2739. cqr->buildclk = get_clock();
  2740. cqr->status = DASD_CQR_FILLED;
  2741. rc = dasd_sleep_on_immediatly(cqr);
  2742. if (useglobal)
  2743. mutex_unlock(&dasd_reserve_mutex);
  2744. else
  2745. dasd_sfree_request(cqr, cqr->memdev);
  2746. return rc;
  2747. }
  2748. /*
  2749. * Steal lock ioctl - unconditional reserve device.
  2750. * Buils a channel programm to break a device's reservation.
  2751. * (unconditional reserve)
  2752. */
  2753. static int
  2754. dasd_eckd_steal_lock(struct dasd_device *device)
  2755. {
  2756. struct dasd_ccw_req *cqr;
  2757. int rc;
  2758. struct ccw1 *ccw;
  2759. int useglobal;
  2760. if (!capable(CAP_SYS_ADMIN))
  2761. return -EACCES;
  2762. useglobal = 0;
  2763. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
  2764. if (IS_ERR(cqr)) {
  2765. mutex_lock(&dasd_reserve_mutex);
  2766. useglobal = 1;
  2767. cqr = &dasd_reserve_req->cqr;
  2768. memset(cqr, 0, sizeof(*cqr));
  2769. memset(&dasd_reserve_req->ccw, 0,
  2770. sizeof(dasd_reserve_req->ccw));
  2771. cqr->cpaddr = &dasd_reserve_req->ccw;
  2772. cqr->data = &dasd_reserve_req->data;
  2773. cqr->magic = DASD_ECKD_MAGIC;
  2774. }
  2775. ccw = cqr->cpaddr;
  2776. ccw->cmd_code = DASD_ECKD_CCW_SLCK;
  2777. ccw->flags |= CCW_FLAG_SLI;
  2778. ccw->count = 32;
  2779. ccw->cda = (__u32)(addr_t) cqr->data;
  2780. cqr->startdev = device;
  2781. cqr->memdev = device;
  2782. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  2783. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2784. cqr->retries = 2; /* set retry counter to enable basic ERP */
  2785. cqr->expires = 2 * HZ;
  2786. cqr->buildclk = get_clock();
  2787. cqr->status = DASD_CQR_FILLED;
  2788. rc = dasd_sleep_on_immediatly(cqr);
  2789. if (useglobal)
  2790. mutex_unlock(&dasd_reserve_mutex);
  2791. else
  2792. dasd_sfree_request(cqr, cqr->memdev);
  2793. return rc;
  2794. }
  2795. /*
  2796. * SNID - Sense Path Group ID
  2797. * This ioctl may be used in situations where I/O is stalled due to
  2798. * a reserve, so if the normal dasd_smalloc_request fails, we use the
  2799. * preallocated dasd_reserve_req.
  2800. */
  2801. static int dasd_eckd_snid(struct dasd_device *device,
  2802. void __user *argp)
  2803. {
  2804. struct dasd_ccw_req *cqr;
  2805. int rc;
  2806. struct ccw1 *ccw;
  2807. int useglobal;
  2808. struct dasd_snid_ioctl_data usrparm;
  2809. if (!capable(CAP_SYS_ADMIN))
  2810. return -EACCES;
  2811. if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
  2812. return -EFAULT;
  2813. useglobal = 0;
  2814. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
  2815. sizeof(struct dasd_snid_data), device);
  2816. if (IS_ERR(cqr)) {
  2817. mutex_lock(&dasd_reserve_mutex);
  2818. useglobal = 1;
  2819. cqr = &dasd_reserve_req->cqr;
  2820. memset(cqr, 0, sizeof(*cqr));
  2821. memset(&dasd_reserve_req->ccw, 0,
  2822. sizeof(dasd_reserve_req->ccw));
  2823. cqr->cpaddr = &dasd_reserve_req->ccw;
  2824. cqr->data = &dasd_reserve_req->data;
  2825. cqr->magic = DASD_ECKD_MAGIC;
  2826. }
  2827. ccw = cqr->cpaddr;
  2828. ccw->cmd_code = DASD_ECKD_CCW_SNID;
  2829. ccw->flags |= CCW_FLAG_SLI;
  2830. ccw->count = 12;
  2831. ccw->cda = (__u32)(addr_t) cqr->data;
  2832. cqr->startdev = device;
  2833. cqr->memdev = device;
  2834. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  2835. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2836. cqr->retries = 5;
  2837. cqr->expires = 10 * HZ;
  2838. cqr->buildclk = get_clock();
  2839. cqr->status = DASD_CQR_FILLED;
  2840. cqr->lpm = usrparm.path_mask;
  2841. rc = dasd_sleep_on_immediatly(cqr);
  2842. /* verify that I/O processing didn't modify the path mask */
  2843. if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
  2844. rc = -EIO;
  2845. if (!rc) {
  2846. usrparm.data = *((struct dasd_snid_data *)cqr->data);
  2847. if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
  2848. rc = -EFAULT;
  2849. }
  2850. if (useglobal)
  2851. mutex_unlock(&dasd_reserve_mutex);
  2852. else
  2853. dasd_sfree_request(cqr, cqr->memdev);
  2854. return rc;
  2855. }
  2856. /*
  2857. * Read performance statistics
  2858. */
  2859. static int
  2860. dasd_eckd_performance(struct dasd_device *device, void __user *argp)
  2861. {
  2862. struct dasd_psf_prssd_data *prssdp;
  2863. struct dasd_rssd_perf_stats_t *stats;
  2864. struct dasd_ccw_req *cqr;
  2865. struct ccw1 *ccw;
  2866. int rc;
  2867. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  2868. (sizeof(struct dasd_psf_prssd_data) +
  2869. sizeof(struct dasd_rssd_perf_stats_t)),
  2870. device);
  2871. if (IS_ERR(cqr)) {
  2872. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  2873. "Could not allocate initialization request");
  2874. return PTR_ERR(cqr);
  2875. }
  2876. cqr->startdev = device;
  2877. cqr->memdev = device;
  2878. cqr->retries = 0;
  2879. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  2880. cqr->expires = 10 * HZ;
  2881. /* Prepare for Read Subsystem Data */
  2882. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  2883. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  2884. prssdp->order = PSF_ORDER_PRSSD;
  2885. prssdp->suborder = 0x01; /* Performance Statistics */
  2886. prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
  2887. ccw = cqr->cpaddr;
  2888. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  2889. ccw->count = sizeof(struct dasd_psf_prssd_data);
  2890. ccw->flags |= CCW_FLAG_CC;
  2891. ccw->cda = (__u32)(addr_t) prssdp;
  2892. /* Read Subsystem Data - Performance Statistics */
  2893. stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
  2894. memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
  2895. ccw++;
  2896. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  2897. ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
  2898. ccw->cda = (__u32)(addr_t) stats;
  2899. cqr->buildclk = get_clock();
  2900. cqr->status = DASD_CQR_FILLED;
  2901. rc = dasd_sleep_on(cqr);
  2902. if (rc == 0) {
  2903. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  2904. stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
  2905. if (copy_to_user(argp, stats,
  2906. sizeof(struct dasd_rssd_perf_stats_t)))
  2907. rc = -EFAULT;
  2908. }
  2909. dasd_sfree_request(cqr, cqr->memdev);
  2910. return rc;
  2911. }
  2912. /*
  2913. * Get attributes (cache operations)
  2914. * Returnes the cache attributes used in Define Extend (DE).
  2915. */
  2916. static int
  2917. dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
  2918. {
  2919. struct dasd_eckd_private *private =
  2920. (struct dasd_eckd_private *)device->private;
  2921. struct attrib_data_t attrib = private->attrib;
  2922. int rc;
  2923. if (!capable(CAP_SYS_ADMIN))
  2924. return -EACCES;
  2925. if (!argp)
  2926. return -EINVAL;
  2927. rc = 0;
  2928. if (copy_to_user(argp, (long *) &attrib,
  2929. sizeof(struct attrib_data_t)))
  2930. rc = -EFAULT;
  2931. return rc;
  2932. }
  2933. /*
  2934. * Set attributes (cache operations)
  2935. * Stores the attributes for cache operation to be used in Define Extend (DE).
  2936. */
  2937. static int
  2938. dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
  2939. {
  2940. struct dasd_eckd_private *private =
  2941. (struct dasd_eckd_private *)device->private;
  2942. struct attrib_data_t attrib;
  2943. if (!capable(CAP_SYS_ADMIN))
  2944. return -EACCES;
  2945. if (!argp)
  2946. return -EINVAL;
  2947. if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
  2948. return -EFAULT;
  2949. private->attrib = attrib;
  2950. dev_info(&device->cdev->dev,
  2951. "The DASD cache mode was set to %x (%i cylinder prestage)\n",
  2952. private->attrib.operation, private->attrib.nr_cyl);
  2953. return 0;
  2954. }
  2955. /*
  2956. * Issue syscall I/O to EMC Symmetrix array.
  2957. * CCWs are PSF and RSSD
  2958. */
  2959. static int dasd_symm_io(struct dasd_device *device, void __user *argp)
  2960. {
  2961. struct dasd_symmio_parms usrparm;
  2962. char *psf_data, *rssd_result;
  2963. struct dasd_ccw_req *cqr;
  2964. struct ccw1 *ccw;
  2965. char psf0, psf1;
  2966. int rc;
  2967. if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
  2968. return -EACCES;
  2969. psf0 = psf1 = 0;
  2970. /* Copy parms from caller */
  2971. rc = -EFAULT;
  2972. if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
  2973. goto out;
  2974. if (is_compat_task() || sizeof(long) == 4) {
  2975. /* Make sure pointers are sane even on 31 bit. */
  2976. rc = -EINVAL;
  2977. if ((usrparm.psf_data >> 32) != 0)
  2978. goto out;
  2979. if ((usrparm.rssd_result >> 32) != 0)
  2980. goto out;
  2981. usrparm.psf_data &= 0x7fffffffULL;
  2982. usrparm.rssd_result &= 0x7fffffffULL;
  2983. }
  2984. /* alloc I/O data area */
  2985. psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
  2986. rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
  2987. if (!psf_data || !rssd_result) {
  2988. rc = -ENOMEM;
  2989. goto out_free;
  2990. }
  2991. /* get syscall header from user space */
  2992. rc = -EFAULT;
  2993. if (copy_from_user(psf_data,
  2994. (void __user *)(unsigned long) usrparm.psf_data,
  2995. usrparm.psf_data_len))
  2996. goto out_free;
  2997. psf0 = psf_data[0];
  2998. psf1 = psf_data[1];
  2999. /* setup CCWs for PSF + RSSD */
  3000. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
  3001. if (IS_ERR(cqr)) {
  3002. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  3003. "Could not allocate initialization request");
  3004. rc = PTR_ERR(cqr);
  3005. goto out_free;
  3006. }
  3007. cqr->startdev = device;
  3008. cqr->memdev = device;
  3009. cqr->retries = 3;
  3010. cqr->expires = 10 * HZ;
  3011. cqr->buildclk = get_clock();
  3012. cqr->status = DASD_CQR_FILLED;
  3013. /* Build the ccws */
  3014. ccw = cqr->cpaddr;
  3015. /* PSF ccw */
  3016. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  3017. ccw->count = usrparm.psf_data_len;
  3018. ccw->flags |= CCW_FLAG_CC;
  3019. ccw->cda = (__u32)(addr_t) psf_data;
  3020. ccw++;
  3021. /* RSSD ccw */
  3022. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  3023. ccw->count = usrparm.rssd_result_len;
  3024. ccw->flags = CCW_FLAG_SLI ;
  3025. ccw->cda = (__u32)(addr_t) rssd_result;
  3026. rc = dasd_sleep_on(cqr);
  3027. if (rc)
  3028. goto out_sfree;
  3029. rc = -EFAULT;
  3030. if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
  3031. rssd_result, usrparm.rssd_result_len))
  3032. goto out_sfree;
  3033. rc = 0;
  3034. out_sfree:
  3035. dasd_sfree_request(cqr, cqr->memdev);
  3036. out_free:
  3037. kfree(rssd_result);
  3038. kfree(psf_data);
  3039. out:
  3040. DBF_DEV_EVENT(DBF_WARNING, device,
  3041. "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
  3042. (int) psf0, (int) psf1, rc);
  3043. return rc;
  3044. }
  3045. static int
  3046. dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
  3047. {
  3048. struct dasd_device *device = block->base;
  3049. switch (cmd) {
  3050. case BIODASDGATTR:
  3051. return dasd_eckd_get_attrib(device, argp);
  3052. case BIODASDSATTR:
  3053. return dasd_eckd_set_attrib(device, argp);
  3054. case BIODASDPSRD:
  3055. return dasd_eckd_performance(device, argp);
  3056. case BIODASDRLSE:
  3057. return dasd_eckd_release(device);
  3058. case BIODASDRSRV:
  3059. return dasd_eckd_reserve(device);
  3060. case BIODASDSLCK:
  3061. return dasd_eckd_steal_lock(device);
  3062. case BIODASDSNID:
  3063. return dasd_eckd_snid(device, argp);
  3064. case BIODASDSYMMIO:
  3065. return dasd_symm_io(device, argp);
  3066. default:
  3067. return -ENOIOCTLCMD;
  3068. }
  3069. }
  3070. /*
  3071. * Dump the range of CCWs into 'page' buffer
  3072. * and return number of printed chars.
  3073. */
  3074. static int
  3075. dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
  3076. {
  3077. int len, count;
  3078. char *datap;
  3079. len = 0;
  3080. while (from <= to) {
  3081. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3082. " CCW %p: %08X %08X DAT:",
  3083. from, ((int *) from)[0], ((int *) from)[1]);
  3084. /* get pointer to data (consider IDALs) */
  3085. if (from->flags & CCW_FLAG_IDA)
  3086. datap = (char *) *((addr_t *) (addr_t) from->cda);
  3087. else
  3088. datap = (char *) ((addr_t) from->cda);
  3089. /* dump data (max 32 bytes) */
  3090. for (count = 0; count < from->count && count < 32; count++) {
  3091. if (count % 8 == 0) len += sprintf(page + len, " ");
  3092. if (count % 4 == 0) len += sprintf(page + len, " ");
  3093. len += sprintf(page + len, "%02x", datap[count]);
  3094. }
  3095. len += sprintf(page + len, "\n");
  3096. from++;
  3097. }
  3098. return len;
  3099. }
  3100. static void
  3101. dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
  3102. char *reason)
  3103. {
  3104. u64 *sense;
  3105. u64 *stat;
  3106. sense = (u64 *) dasd_get_sense(irb);
  3107. stat = (u64 *) &irb->scsw;
  3108. if (sense) {
  3109. DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
  3110. "%016llx %016llx %016llx %016llx",
  3111. reason, *stat, *((u32 *) (stat + 1)),
  3112. sense[0], sense[1], sense[2], sense[3]);
  3113. } else {
  3114. DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
  3115. reason, *stat, *((u32 *) (stat + 1)),
  3116. "NO VALID SENSE");
  3117. }
  3118. }
  3119. /*
  3120. * Print sense data and related channel program.
  3121. * Parts are printed because printk buffer is only 1024 bytes.
  3122. */
  3123. static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
  3124. struct dasd_ccw_req *req, struct irb *irb)
  3125. {
  3126. char *page;
  3127. struct ccw1 *first, *last, *fail, *from, *to;
  3128. int len, sl, sct;
  3129. page = (char *) get_zeroed_page(GFP_ATOMIC);
  3130. if (page == NULL) {
  3131. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  3132. "No memory to dump sense data\n");
  3133. return;
  3134. }
  3135. /* dump the sense data */
  3136. len = sprintf(page, KERN_ERR PRINTK_HEADER
  3137. " I/O status report for device %s:\n",
  3138. dev_name(&device->cdev->dev));
  3139. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3140. " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
  3141. "CS:%02X RC:%d\n",
  3142. req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
  3143. scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
  3144. scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
  3145. req ? req->intrc : 0);
  3146. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3147. " device %s: Failing CCW: %p\n",
  3148. dev_name(&device->cdev->dev),
  3149. (void *) (addr_t) irb->scsw.cmd.cpa);
  3150. if (irb->esw.esw0.erw.cons) {
  3151. for (sl = 0; sl < 4; sl++) {
  3152. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3153. " Sense(hex) %2d-%2d:",
  3154. (8 * sl), ((8 * sl) + 7));
  3155. for (sct = 0; sct < 8; sct++) {
  3156. len += sprintf(page + len, " %02x",
  3157. irb->ecw[8 * sl + sct]);
  3158. }
  3159. len += sprintf(page + len, "\n");
  3160. }
  3161. if (irb->ecw[27] & DASD_SENSE_BIT_0) {
  3162. /* 24 Byte Sense Data */
  3163. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3164. " 24 Byte: %x MSG %x, "
  3165. "%s MSGb to SYSOP\n",
  3166. irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
  3167. irb->ecw[1] & 0x10 ? "" : "no");
  3168. } else {
  3169. /* 32 Byte Sense Data */
  3170. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3171. " 32 Byte: Format: %x "
  3172. "Exception class %x\n",
  3173. irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
  3174. }
  3175. } else {
  3176. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3177. " SORRY - NO VALID SENSE AVAILABLE\n");
  3178. }
  3179. printk("%s", page);
  3180. if (req) {
  3181. /* req == NULL for unsolicited interrupts */
  3182. /* dump the Channel Program (max 140 Bytes per line) */
  3183. /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
  3184. first = req->cpaddr;
  3185. for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
  3186. to = min(first + 6, last);
  3187. len = sprintf(page, KERN_ERR PRINTK_HEADER
  3188. " Related CP in req: %p\n", req);
  3189. dasd_eckd_dump_ccw_range(first, to, page + len);
  3190. printk("%s", page);
  3191. /* print failing CCW area (maximum 4) */
  3192. /* scsw->cda is either valid or zero */
  3193. len = 0;
  3194. from = ++to;
  3195. fail = (struct ccw1 *)(addr_t)
  3196. irb->scsw.cmd.cpa; /* failing CCW */
  3197. if (from < fail - 2) {
  3198. from = fail - 2; /* there is a gap - print header */
  3199. len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
  3200. }
  3201. to = min(fail + 1, last);
  3202. len += dasd_eckd_dump_ccw_range(from, to, page + len);
  3203. /* print last CCWs (maximum 2) */
  3204. from = max(from, ++to);
  3205. if (from < last - 1) {
  3206. from = last - 1; /* there is a gap - print header */
  3207. len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
  3208. }
  3209. len += dasd_eckd_dump_ccw_range(from, last, page + len);
  3210. if (len > 0)
  3211. printk("%s", page);
  3212. }
  3213. free_page((unsigned long) page);
  3214. }
  3215. /*
  3216. * Print sense data from a tcw.
  3217. */
  3218. static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
  3219. struct dasd_ccw_req *req, struct irb *irb)
  3220. {
  3221. char *page;
  3222. int len, sl, sct, residual;
  3223. struct tsb *tsb;
  3224. u8 *sense, *rcq;
  3225. page = (char *) get_zeroed_page(GFP_ATOMIC);
  3226. if (page == NULL) {
  3227. DBF_DEV_EVENT(DBF_WARNING, device, " %s",
  3228. "No memory to dump sense data");
  3229. return;
  3230. }
  3231. /* dump the sense data */
  3232. len = sprintf(page, KERN_ERR PRINTK_HEADER
  3233. " I/O status report for device %s:\n",
  3234. dev_name(&device->cdev->dev));
  3235. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3236. " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
  3237. "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
  3238. req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
  3239. scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
  3240. scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
  3241. irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
  3242. req ? req->intrc : 0);
  3243. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3244. " device %s: Failing TCW: %p\n",
  3245. dev_name(&device->cdev->dev),
  3246. (void *) (addr_t) irb->scsw.tm.tcw);
  3247. tsb = NULL;
  3248. sense = NULL;
  3249. if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
  3250. tsb = tcw_get_tsb(
  3251. (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
  3252. if (tsb) {
  3253. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3254. " tsb->length %d\n", tsb->length);
  3255. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3256. " tsb->flags %x\n", tsb->flags);
  3257. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3258. " tsb->dcw_offset %d\n", tsb->dcw_offset);
  3259. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3260. " tsb->count %d\n", tsb->count);
  3261. residual = tsb->count - 28;
  3262. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3263. " residual %d\n", residual);
  3264. switch (tsb->flags & 0x07) {
  3265. case 1: /* tsa_iostat */
  3266. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3267. " tsb->tsa.iostat.dev_time %d\n",
  3268. tsb->tsa.iostat.dev_time);
  3269. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3270. " tsb->tsa.iostat.def_time %d\n",
  3271. tsb->tsa.iostat.def_time);
  3272. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3273. " tsb->tsa.iostat.queue_time %d\n",
  3274. tsb->tsa.iostat.queue_time);
  3275. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3276. " tsb->tsa.iostat.dev_busy_time %d\n",
  3277. tsb->tsa.iostat.dev_busy_time);
  3278. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3279. " tsb->tsa.iostat.dev_act_time %d\n",
  3280. tsb->tsa.iostat.dev_act_time);
  3281. sense = tsb->tsa.iostat.sense;
  3282. break;
  3283. case 2: /* ts_ddpc */
  3284. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3285. " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
  3286. for (sl = 0; sl < 2; sl++) {
  3287. len += sprintf(page + len,
  3288. KERN_ERR PRINTK_HEADER
  3289. " tsb->tsa.ddpc.rcq %2d-%2d: ",
  3290. (8 * sl), ((8 * sl) + 7));
  3291. rcq = tsb->tsa.ddpc.rcq;
  3292. for (sct = 0; sct < 8; sct++) {
  3293. len += sprintf(page + len, " %02x",
  3294. rcq[8 * sl + sct]);
  3295. }
  3296. len += sprintf(page + len, "\n");
  3297. }
  3298. sense = tsb->tsa.ddpc.sense;
  3299. break;
  3300. case 3: /* tsa_intrg */
  3301. len += sprintf(page + len, KERN_ERR PRINTK_HEADER
  3302. " tsb->tsa.intrg.: not supportet yet \n");
  3303. break;
  3304. }
  3305. if (sense) {
  3306. for (sl = 0; sl < 4; sl++) {
  3307. len += sprintf(page + len,
  3308. KERN_ERR PRINTK_HEADER
  3309. " Sense(hex) %2d-%2d:",
  3310. (8 * sl), ((8 * sl) + 7));
  3311. for (sct = 0; sct < 8; sct++) {
  3312. len += sprintf(page + len, " %02x",
  3313. sense[8 * sl + sct]);
  3314. }
  3315. len += sprintf(page + len, "\n");
  3316. }
  3317. if (sense[27] & DASD_SENSE_BIT_0) {
  3318. /* 24 Byte Sense Data */
  3319. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3320. " 24 Byte: %x MSG %x, "
  3321. "%s MSGb to SYSOP\n",
  3322. sense[7] >> 4, sense[7] & 0x0f,
  3323. sense[1] & 0x10 ? "" : "no");
  3324. } else {
  3325. /* 32 Byte Sense Data */
  3326. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3327. " 32 Byte: Format: %x "
  3328. "Exception class %x\n",
  3329. sense[6] & 0x0f, sense[22] >> 4);
  3330. }
  3331. } else {
  3332. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3333. " SORRY - NO VALID SENSE AVAILABLE\n");
  3334. }
  3335. } else {
  3336. sprintf(page + len, KERN_ERR PRINTK_HEADER
  3337. " SORRY - NO TSB DATA AVAILABLE\n");
  3338. }
  3339. printk("%s", page);
  3340. free_page((unsigned long) page);
  3341. }
  3342. static void dasd_eckd_dump_sense(struct dasd_device *device,
  3343. struct dasd_ccw_req *req, struct irb *irb)
  3344. {
  3345. if (scsw_is_tm(&irb->scsw))
  3346. dasd_eckd_dump_sense_tcw(device, req, irb);
  3347. else
  3348. dasd_eckd_dump_sense_ccw(device, req, irb);
  3349. }
  3350. static int dasd_eckd_pm_freeze(struct dasd_device *device)
  3351. {
  3352. /*
  3353. * the device should be disconnected from our LCU structure
  3354. * on restore we will reconnect it and reread LCU specific
  3355. * information like PAV support that might have changed
  3356. */
  3357. dasd_alias_remove_device(device);
  3358. dasd_alias_disconnect_device_from_lcu(device);
  3359. return 0;
  3360. }
  3361. static int dasd_eckd_restore_device(struct dasd_device *device)
  3362. {
  3363. struct dasd_eckd_private *private;
  3364. struct dasd_eckd_characteristics temp_rdc_data;
  3365. int is_known, rc;
  3366. struct dasd_uid temp_uid;
  3367. unsigned long flags;
  3368. private = (struct dasd_eckd_private *) device->private;
  3369. /* Read Configuration Data */
  3370. rc = dasd_eckd_read_conf(device);
  3371. if (rc)
  3372. goto out_err;
  3373. dasd_eckd_get_uid(device, &temp_uid);
  3374. /* Generate device unique id */
  3375. rc = dasd_eckd_generate_uid(device);
  3376. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  3377. if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
  3378. dev_err(&device->cdev->dev, "The UID of the DASD has "
  3379. "changed\n");
  3380. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  3381. if (rc)
  3382. goto out_err;
  3383. /* register lcu with alias handling, enable PAV if this is a new lcu */
  3384. is_known = dasd_alias_make_device_known_to_lcu(device);
  3385. if (is_known < 0)
  3386. return is_known;
  3387. if (!is_known) {
  3388. dasd_eckd_validate_server(device);
  3389. dasd_alias_lcu_setup_complete(device);
  3390. } else
  3391. dasd_alias_wait_for_lcu_setup(device);
  3392. /* RE-Read Configuration Data */
  3393. rc = dasd_eckd_read_conf(device);
  3394. if (rc)
  3395. goto out_err;
  3396. /* Read Feature Codes */
  3397. dasd_eckd_read_features(device);
  3398. /* Read Device Characteristics */
  3399. rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
  3400. &temp_rdc_data, 64);
  3401. if (rc) {
  3402. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  3403. "Read device characteristic failed, rc=%d", rc);
  3404. goto out_err;
  3405. }
  3406. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  3407. memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
  3408. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  3409. /* add device to alias management */
  3410. dasd_alias_add_device(device);
  3411. return 0;
  3412. out_err:
  3413. return -1;
  3414. }
  3415. static int dasd_eckd_reload_device(struct dasd_device *device)
  3416. {
  3417. struct dasd_eckd_private *private;
  3418. int rc, old_base;
  3419. char print_uid[60];
  3420. struct dasd_uid uid;
  3421. unsigned long flags;
  3422. private = (struct dasd_eckd_private *) device->private;
  3423. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  3424. old_base = private->uid.base_unit_addr;
  3425. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  3426. /* Read Configuration Data */
  3427. rc = dasd_eckd_read_conf(device);
  3428. if (rc)
  3429. goto out_err;
  3430. rc = dasd_eckd_generate_uid(device);
  3431. if (rc)
  3432. goto out_err;
  3433. /*
  3434. * update unit address configuration and
  3435. * add device to alias management
  3436. */
  3437. dasd_alias_update_add_device(device);
  3438. dasd_eckd_get_uid(device, &uid);
  3439. if (old_base != uid.base_unit_addr) {
  3440. if (strlen(uid.vduit) > 0)
  3441. snprintf(print_uid, sizeof(print_uid),
  3442. "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
  3443. uid.ssid, uid.base_unit_addr, uid.vduit);
  3444. else
  3445. snprintf(print_uid, sizeof(print_uid),
  3446. "%s.%s.%04x.%02x", uid.vendor, uid.serial,
  3447. uid.ssid, uid.base_unit_addr);
  3448. dev_info(&device->cdev->dev,
  3449. "An Alias device was reassigned to a new base device "
  3450. "with UID: %s\n", print_uid);
  3451. }
  3452. return 0;
  3453. out_err:
  3454. return -1;
  3455. }
  3456. static struct ccw_driver dasd_eckd_driver = {
  3457. .name = "dasd-eckd",
  3458. .owner = THIS_MODULE,
  3459. .ids = dasd_eckd_ids,
  3460. .probe = dasd_eckd_probe,
  3461. .remove = dasd_generic_remove,
  3462. .set_offline = dasd_generic_set_offline,
  3463. .set_online = dasd_eckd_set_online,
  3464. .notify = dasd_generic_notify,
  3465. .path_event = dasd_generic_path_event,
  3466. .freeze = dasd_generic_pm_freeze,
  3467. .thaw = dasd_generic_restore_device,
  3468. .restore = dasd_generic_restore_device,
  3469. .uc_handler = dasd_generic_uc_handler,
  3470. };
  3471. /*
  3472. * max_blocks is dependent on the amount of storage that is available
  3473. * in the static io buffer for each device. Currently each device has
  3474. * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
  3475. * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
  3476. * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
  3477. * addition we have one define extent ccw + 16 bytes of data and one
  3478. * locate record ccw + 16 bytes of data. That makes:
  3479. * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
  3480. * We want to fit two into the available memory so that we can immediately
  3481. * start the next request if one finishes off. That makes 249.5 blocks
  3482. * for one request. Give a little safety and the result is 240.
  3483. */
  3484. static struct dasd_discipline dasd_eckd_discipline = {
  3485. .owner = THIS_MODULE,
  3486. .name = "ECKD",
  3487. .ebcname = "ECKD",
  3488. .max_blocks = 190,
  3489. .check_device = dasd_eckd_check_characteristics,
  3490. .uncheck_device = dasd_eckd_uncheck_device,
  3491. .do_analysis = dasd_eckd_do_analysis,
  3492. .verify_path = dasd_eckd_verify_path,
  3493. .ready_to_online = dasd_eckd_ready_to_online,
  3494. .online_to_ready = dasd_eckd_online_to_ready,
  3495. .fill_geometry = dasd_eckd_fill_geometry,
  3496. .start_IO = dasd_start_IO,
  3497. .term_IO = dasd_term_IO,
  3498. .handle_terminated_request = dasd_eckd_handle_terminated_request,
  3499. .format_device = dasd_eckd_format_device,
  3500. .erp_action = dasd_eckd_erp_action,
  3501. .erp_postaction = dasd_eckd_erp_postaction,
  3502. .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
  3503. .build_cp = dasd_eckd_build_alias_cp,
  3504. .free_cp = dasd_eckd_free_alias_cp,
  3505. .dump_sense = dasd_eckd_dump_sense,
  3506. .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
  3507. .fill_info = dasd_eckd_fill_info,
  3508. .ioctl = dasd_eckd_ioctl,
  3509. .freeze = dasd_eckd_pm_freeze,
  3510. .restore = dasd_eckd_restore_device,
  3511. .reload = dasd_eckd_reload_device,
  3512. .get_uid = dasd_eckd_get_uid,
  3513. };
  3514. static int __init
  3515. dasd_eckd_init(void)
  3516. {
  3517. int ret;
  3518. ASCEBC(dasd_eckd_discipline.ebcname, 4);
  3519. dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
  3520. GFP_KERNEL | GFP_DMA);
  3521. if (!dasd_reserve_req)
  3522. return -ENOMEM;
  3523. path_verification_worker = kmalloc(sizeof(*path_verification_worker),
  3524. GFP_KERNEL | GFP_DMA);
  3525. if (!path_verification_worker) {
  3526. kfree(dasd_reserve_req);
  3527. return -ENOMEM;
  3528. }
  3529. ret = ccw_driver_register(&dasd_eckd_driver);
  3530. if (!ret)
  3531. wait_for_device_probe();
  3532. else {
  3533. kfree(path_verification_worker);
  3534. kfree(dasd_reserve_req);
  3535. }
  3536. return ret;
  3537. }
  3538. static void __exit
  3539. dasd_eckd_cleanup(void)
  3540. {
  3541. ccw_driver_unregister(&dasd_eckd_driver);
  3542. kfree(path_verification_worker);
  3543. kfree(dasd_reserve_req);
  3544. }
  3545. module_init(dasd_eckd_init);
  3546. module_exit(dasd_eckd_cleanup);