ehea_main.c 89 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772
  1. /*
  2. * linux/drivers/net/ehea/ehea_main.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #include <linux/in.h>
  29. #include <linux/ip.h>
  30. #include <linux/tcp.h>
  31. #include <linux/udp.h>
  32. #include <linux/if.h>
  33. #include <linux/list.h>
  34. #include <linux/slab.h>
  35. #include <linux/if_ether.h>
  36. #include <linux/notifier.h>
  37. #include <linux/reboot.h>
  38. #include <linux/memory.h>
  39. #include <asm/kexec.h>
  40. #include <linux/mutex.h>
  41. #include <net/ip.h>
  42. #include "ehea.h"
  43. #include "ehea_qmr.h"
  44. #include "ehea_phyp.h"
  45. MODULE_LICENSE("GPL");
  46. MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  47. MODULE_DESCRIPTION("IBM eServer HEA Driver");
  48. MODULE_VERSION(DRV_VERSION);
  49. static int msg_level = -1;
  50. static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  51. static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  52. static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  53. static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  54. static int use_mcs;
  55. static int use_lro;
  56. static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
  57. static int num_tx_qps = EHEA_NUM_TX_QP;
  58. static int prop_carrier_state;
  59. module_param(msg_level, int, 0);
  60. module_param(rq1_entries, int, 0);
  61. module_param(rq2_entries, int, 0);
  62. module_param(rq3_entries, int, 0);
  63. module_param(sq_entries, int, 0);
  64. module_param(prop_carrier_state, int, 0);
  65. module_param(use_mcs, int, 0);
  66. module_param(use_lro, int, 0);
  67. module_param(lro_max_aggr, int, 0);
  68. module_param(num_tx_qps, int, 0);
  69. MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
  70. MODULE_PARM_DESC(msg_level, "msg_level");
  71. MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  72. "port to stack. 1:yes, 0:no. Default = 0 ");
  73. MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  74. "[2^x - 1], x = [6..14]. Default = "
  75. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  76. MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  77. "[2^x - 1], x = [6..14]. Default = "
  78. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  79. MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  80. "[2^x - 1], x = [6..14]. Default = "
  81. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  82. MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
  83. "[2^x - 1], x = [6..14]. Default = "
  84. __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  85. MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
  86. MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
  87. __MODULE_STRING(EHEA_LRO_MAX_AGGR));
  88. MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
  89. "Default = 0");
  90. static int port_name_cnt;
  91. static LIST_HEAD(adapter_list);
  92. static unsigned long ehea_driver_flags;
  93. struct work_struct ehea_rereg_mr_task;
  94. static DEFINE_MUTEX(dlpar_mem_lock);
  95. struct ehea_fw_handle_array ehea_fw_handles;
  96. struct ehea_bcmc_reg_array ehea_bcmc_regs;
  97. static int __devinit ehea_probe_adapter(struct platform_device *dev,
  98. const struct of_device_id *id);
  99. static int __devexit ehea_remove(struct platform_device *dev);
  100. static struct of_device_id ehea_device_table[] = {
  101. {
  102. .name = "lhea",
  103. .compatible = "IBM,lhea",
  104. },
  105. {},
  106. };
  107. MODULE_DEVICE_TABLE(of, ehea_device_table);
  108. static struct of_platform_driver ehea_driver = {
  109. .driver = {
  110. .name = "ehea",
  111. .owner = THIS_MODULE,
  112. .of_match_table = ehea_device_table,
  113. },
  114. .probe = ehea_probe_adapter,
  115. .remove = ehea_remove,
  116. };
  117. void ehea_dump(void *adr, int len, char *msg)
  118. {
  119. int x;
  120. unsigned char *deb = adr;
  121. for (x = 0; x < len; x += 16) {
  122. printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
  123. deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
  124. deb += 16;
  125. }
  126. }
  127. void ehea_schedule_port_reset(struct ehea_port *port)
  128. {
  129. if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
  130. schedule_work(&port->reset_task);
  131. }
  132. static void ehea_update_firmware_handles(void)
  133. {
  134. struct ehea_fw_handle_entry *arr = NULL;
  135. struct ehea_adapter *adapter;
  136. int num_adapters = 0;
  137. int num_ports = 0;
  138. int num_portres = 0;
  139. int i = 0;
  140. int num_fw_handles, k, l;
  141. /* Determine number of handles */
  142. mutex_lock(&ehea_fw_handles.lock);
  143. list_for_each_entry(adapter, &adapter_list, list) {
  144. num_adapters++;
  145. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  146. struct ehea_port *port = adapter->port[k];
  147. if (!port || (port->state != EHEA_PORT_UP))
  148. continue;
  149. num_ports++;
  150. num_portres += port->num_def_qps + port->num_add_tx_qps;
  151. }
  152. }
  153. num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
  154. num_ports * EHEA_NUM_PORT_FW_HANDLES +
  155. num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
  156. if (num_fw_handles) {
  157. arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
  158. if (!arr)
  159. goto out; /* Keep the existing array */
  160. } else
  161. goto out_update;
  162. list_for_each_entry(adapter, &adapter_list, list) {
  163. if (num_adapters == 0)
  164. break;
  165. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  166. struct ehea_port *port = adapter->port[k];
  167. if (!port || (port->state != EHEA_PORT_UP) ||
  168. (num_ports == 0))
  169. continue;
  170. for (l = 0;
  171. l < port->num_def_qps + port->num_add_tx_qps;
  172. l++) {
  173. struct ehea_port_res *pr = &port->port_res[l];
  174. arr[i].adh = adapter->handle;
  175. arr[i++].fwh = pr->qp->fw_handle;
  176. arr[i].adh = adapter->handle;
  177. arr[i++].fwh = pr->send_cq->fw_handle;
  178. arr[i].adh = adapter->handle;
  179. arr[i++].fwh = pr->recv_cq->fw_handle;
  180. arr[i].adh = adapter->handle;
  181. arr[i++].fwh = pr->eq->fw_handle;
  182. arr[i].adh = adapter->handle;
  183. arr[i++].fwh = pr->send_mr.handle;
  184. arr[i].adh = adapter->handle;
  185. arr[i++].fwh = pr->recv_mr.handle;
  186. }
  187. arr[i].adh = adapter->handle;
  188. arr[i++].fwh = port->qp_eq->fw_handle;
  189. num_ports--;
  190. }
  191. arr[i].adh = adapter->handle;
  192. arr[i++].fwh = adapter->neq->fw_handle;
  193. if (adapter->mr.handle) {
  194. arr[i].adh = adapter->handle;
  195. arr[i++].fwh = adapter->mr.handle;
  196. }
  197. num_adapters--;
  198. }
  199. out_update:
  200. kfree(ehea_fw_handles.arr);
  201. ehea_fw_handles.arr = arr;
  202. ehea_fw_handles.num_entries = i;
  203. out:
  204. mutex_unlock(&ehea_fw_handles.lock);
  205. }
  206. static void ehea_update_bcmc_registrations(void)
  207. {
  208. unsigned long flags;
  209. struct ehea_bcmc_reg_entry *arr = NULL;
  210. struct ehea_adapter *adapter;
  211. struct ehea_mc_list *mc_entry;
  212. int num_registrations = 0;
  213. int i = 0;
  214. int k;
  215. spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
  216. /* Determine number of registrations */
  217. list_for_each_entry(adapter, &adapter_list, list)
  218. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  219. struct ehea_port *port = adapter->port[k];
  220. if (!port || (port->state != EHEA_PORT_UP))
  221. continue;
  222. num_registrations += 2; /* Broadcast registrations */
  223. list_for_each_entry(mc_entry, &port->mc_list->list,list)
  224. num_registrations += 2;
  225. }
  226. if (num_registrations) {
  227. arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
  228. if (!arr)
  229. goto out; /* Keep the existing array */
  230. } else
  231. goto out_update;
  232. list_for_each_entry(adapter, &adapter_list, list) {
  233. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  234. struct ehea_port *port = adapter->port[k];
  235. if (!port || (port->state != EHEA_PORT_UP))
  236. continue;
  237. if (num_registrations == 0)
  238. goto out_update;
  239. arr[i].adh = adapter->handle;
  240. arr[i].port_id = port->logical_port_id;
  241. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  242. EHEA_BCMC_UNTAGGED;
  243. arr[i++].macaddr = port->mac_addr;
  244. arr[i].adh = adapter->handle;
  245. arr[i].port_id = port->logical_port_id;
  246. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  247. EHEA_BCMC_VLANID_ALL;
  248. arr[i++].macaddr = port->mac_addr;
  249. num_registrations -= 2;
  250. list_for_each_entry(mc_entry,
  251. &port->mc_list->list, list) {
  252. if (num_registrations == 0)
  253. goto out_update;
  254. arr[i].adh = adapter->handle;
  255. arr[i].port_id = port->logical_port_id;
  256. arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
  257. EHEA_BCMC_MULTICAST |
  258. EHEA_BCMC_UNTAGGED;
  259. arr[i++].macaddr = mc_entry->macaddr;
  260. arr[i].adh = adapter->handle;
  261. arr[i].port_id = port->logical_port_id;
  262. arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
  263. EHEA_BCMC_MULTICAST |
  264. EHEA_BCMC_VLANID_ALL;
  265. arr[i++].macaddr = mc_entry->macaddr;
  266. num_registrations -= 2;
  267. }
  268. }
  269. }
  270. out_update:
  271. kfree(ehea_bcmc_regs.arr);
  272. ehea_bcmc_regs.arr = arr;
  273. ehea_bcmc_regs.num_entries = i;
  274. out:
  275. spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
  276. }
  277. static struct net_device_stats *ehea_get_stats(struct net_device *dev)
  278. {
  279. struct ehea_port *port = netdev_priv(dev);
  280. struct net_device_stats *stats = &port->stats;
  281. struct hcp_ehea_port_cb2 *cb2;
  282. u64 hret, rx_packets, tx_packets;
  283. int i;
  284. memset(stats, 0, sizeof(*stats));
  285. cb2 = (void *)get_zeroed_page(GFP_KERNEL);
  286. if (!cb2) {
  287. ehea_error("no mem for cb2");
  288. goto out;
  289. }
  290. hret = ehea_h_query_ehea_port(port->adapter->handle,
  291. port->logical_port_id,
  292. H_PORT_CB2, H_PORT_CB2_ALL, cb2);
  293. if (hret != H_SUCCESS) {
  294. ehea_error("query_ehea_port failed");
  295. goto out_herr;
  296. }
  297. if (netif_msg_hw(port))
  298. ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
  299. rx_packets = 0;
  300. for (i = 0; i < port->num_def_qps; i++)
  301. rx_packets += port->port_res[i].rx_packets;
  302. tx_packets = 0;
  303. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
  304. tx_packets += port->port_res[i].tx_packets;
  305. stats->tx_packets = tx_packets;
  306. stats->multicast = cb2->rxmcp;
  307. stats->rx_errors = cb2->rxuerr;
  308. stats->rx_bytes = cb2->rxo;
  309. stats->tx_bytes = cb2->txo;
  310. stats->rx_packets = rx_packets;
  311. out_herr:
  312. free_page((unsigned long)cb2);
  313. out:
  314. return stats;
  315. }
  316. static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
  317. {
  318. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  319. struct net_device *dev = pr->port->netdev;
  320. int max_index_mask = pr->rq1_skba.len - 1;
  321. int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
  322. int adder = 0;
  323. int i;
  324. pr->rq1_skba.os_skbs = 0;
  325. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  326. if (nr_of_wqes > 0)
  327. pr->rq1_skba.index = index;
  328. pr->rq1_skba.os_skbs = fill_wqes;
  329. return;
  330. }
  331. for (i = 0; i < fill_wqes; i++) {
  332. if (!skb_arr_rq1[index]) {
  333. skb_arr_rq1[index] = netdev_alloc_skb(dev,
  334. EHEA_L_PKT_SIZE);
  335. if (!skb_arr_rq1[index]) {
  336. pr->rq1_skba.os_skbs = fill_wqes - i;
  337. break;
  338. }
  339. }
  340. index--;
  341. index &= max_index_mask;
  342. adder++;
  343. }
  344. if (adder == 0)
  345. return;
  346. /* Ring doorbell */
  347. ehea_update_rq1a(pr->qp, adder);
  348. }
  349. static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
  350. {
  351. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  352. struct net_device *dev = pr->port->netdev;
  353. int i;
  354. for (i = 0; i < pr->rq1_skba.len; i++) {
  355. skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
  356. if (!skb_arr_rq1[i])
  357. break;
  358. }
  359. /* Ring doorbell */
  360. ehea_update_rq1a(pr->qp, nr_rq1a);
  361. }
  362. static int ehea_refill_rq_def(struct ehea_port_res *pr,
  363. struct ehea_q_skb_arr *q_skba, int rq_nr,
  364. int num_wqes, int wqe_type, int packet_size)
  365. {
  366. struct net_device *dev = pr->port->netdev;
  367. struct ehea_qp *qp = pr->qp;
  368. struct sk_buff **skb_arr = q_skba->arr;
  369. struct ehea_rwqe *rwqe;
  370. int i, index, max_index_mask, fill_wqes;
  371. int adder = 0;
  372. int ret = 0;
  373. fill_wqes = q_skba->os_skbs + num_wqes;
  374. q_skba->os_skbs = 0;
  375. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  376. q_skba->os_skbs = fill_wqes;
  377. return ret;
  378. }
  379. index = q_skba->index;
  380. max_index_mask = q_skba->len - 1;
  381. for (i = 0; i < fill_wqes; i++) {
  382. u64 tmp_addr;
  383. struct sk_buff *skb;
  384. skb = netdev_alloc_skb_ip_align(dev, packet_size);
  385. if (!skb) {
  386. q_skba->os_skbs = fill_wqes - i;
  387. if (q_skba->os_skbs == q_skba->len - 2) {
  388. ehea_info("%s: rq%i ran dry - no mem for skb",
  389. pr->port->netdev->name, rq_nr);
  390. ret = -ENOMEM;
  391. }
  392. break;
  393. }
  394. skb_arr[index] = skb;
  395. tmp_addr = ehea_map_vaddr(skb->data);
  396. if (tmp_addr == -1) {
  397. dev_kfree_skb(skb);
  398. q_skba->os_skbs = fill_wqes - i;
  399. ret = 0;
  400. break;
  401. }
  402. rwqe = ehea_get_next_rwqe(qp, rq_nr);
  403. rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
  404. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
  405. rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
  406. rwqe->sg_list[0].vaddr = tmp_addr;
  407. rwqe->sg_list[0].len = packet_size;
  408. rwqe->data_segments = 1;
  409. index++;
  410. index &= max_index_mask;
  411. adder++;
  412. }
  413. q_skba->index = index;
  414. if (adder == 0)
  415. goto out;
  416. /* Ring doorbell */
  417. iosync();
  418. if (rq_nr == 2)
  419. ehea_update_rq2a(pr->qp, adder);
  420. else
  421. ehea_update_rq3a(pr->qp, adder);
  422. out:
  423. return ret;
  424. }
  425. static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
  426. {
  427. return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
  428. nr_of_wqes, EHEA_RWQE2_TYPE,
  429. EHEA_RQ2_PKT_SIZE);
  430. }
  431. static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
  432. {
  433. return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
  434. nr_of_wqes, EHEA_RWQE3_TYPE,
  435. EHEA_MAX_PACKET_SIZE);
  436. }
  437. static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
  438. {
  439. *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
  440. if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
  441. return 0;
  442. if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
  443. (cqe->header_length == 0))
  444. return 0;
  445. return -EINVAL;
  446. }
  447. static inline void ehea_fill_skb(struct net_device *dev,
  448. struct sk_buff *skb, struct ehea_cqe *cqe)
  449. {
  450. int length = cqe->num_bytes_transfered - 4; /*remove CRC */
  451. skb_put(skb, length);
  452. skb->ip_summed = CHECKSUM_UNNECESSARY;
  453. skb->protocol = eth_type_trans(skb, dev);
  454. }
  455. static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
  456. int arr_len,
  457. struct ehea_cqe *cqe)
  458. {
  459. int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  460. struct sk_buff *skb;
  461. void *pref;
  462. int x;
  463. x = skb_index + 1;
  464. x &= (arr_len - 1);
  465. pref = skb_array[x];
  466. if (pref) {
  467. prefetchw(pref);
  468. prefetchw(pref + EHEA_CACHE_LINE);
  469. pref = (skb_array[x]->data);
  470. prefetch(pref);
  471. prefetch(pref + EHEA_CACHE_LINE);
  472. prefetch(pref + EHEA_CACHE_LINE * 2);
  473. prefetch(pref + EHEA_CACHE_LINE * 3);
  474. }
  475. skb = skb_array[skb_index];
  476. skb_array[skb_index] = NULL;
  477. return skb;
  478. }
  479. static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
  480. int arr_len, int wqe_index)
  481. {
  482. struct sk_buff *skb;
  483. void *pref;
  484. int x;
  485. x = wqe_index + 1;
  486. x &= (arr_len - 1);
  487. pref = skb_array[x];
  488. if (pref) {
  489. prefetchw(pref);
  490. prefetchw(pref + EHEA_CACHE_LINE);
  491. pref = (skb_array[x]->data);
  492. prefetchw(pref);
  493. prefetchw(pref + EHEA_CACHE_LINE);
  494. }
  495. skb = skb_array[wqe_index];
  496. skb_array[wqe_index] = NULL;
  497. return skb;
  498. }
  499. static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
  500. struct ehea_cqe *cqe, int *processed_rq2,
  501. int *processed_rq3)
  502. {
  503. struct sk_buff *skb;
  504. if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
  505. pr->p_stats.err_tcp_cksum++;
  506. if (cqe->status & EHEA_CQE_STAT_ERR_IP)
  507. pr->p_stats.err_ip_cksum++;
  508. if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
  509. pr->p_stats.err_frame_crc++;
  510. if (rq == 2) {
  511. *processed_rq2 += 1;
  512. skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
  513. dev_kfree_skb(skb);
  514. } else if (rq == 3) {
  515. *processed_rq3 += 1;
  516. skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
  517. dev_kfree_skb(skb);
  518. }
  519. if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
  520. if (netif_msg_rx_err(pr->port)) {
  521. ehea_error("Critical receive error for QP %d. "
  522. "Resetting port.", pr->qp->init_attr.qp_nr);
  523. ehea_dump(cqe, sizeof(*cqe), "CQE");
  524. }
  525. ehea_schedule_port_reset(pr->port);
  526. return 1;
  527. }
  528. return 0;
  529. }
  530. static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  531. void **tcph, u64 *hdr_flags, void *priv)
  532. {
  533. struct ehea_cqe *cqe = priv;
  534. unsigned int ip_len;
  535. struct iphdr *iph;
  536. /* non tcp/udp packets */
  537. if (!cqe->header_length)
  538. return -1;
  539. /* non tcp packet */
  540. skb_reset_network_header(skb);
  541. iph = ip_hdr(skb);
  542. if (iph->protocol != IPPROTO_TCP)
  543. return -1;
  544. ip_len = ip_hdrlen(skb);
  545. skb_set_transport_header(skb, ip_len);
  546. *tcph = tcp_hdr(skb);
  547. /* check if ip header and tcp header are complete */
  548. if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
  549. return -1;
  550. *hdr_flags = LRO_IPV4 | LRO_TCP;
  551. *iphdr = iph;
  552. return 0;
  553. }
  554. static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
  555. struct sk_buff *skb)
  556. {
  557. int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
  558. pr->port->vgrp);
  559. if (use_lro) {
  560. if (vlan_extracted)
  561. lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
  562. pr->port->vgrp,
  563. cqe->vlan_tag,
  564. cqe);
  565. else
  566. lro_receive_skb(&pr->lro_mgr, skb, cqe);
  567. } else {
  568. if (vlan_extracted)
  569. vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
  570. cqe->vlan_tag);
  571. else
  572. netif_receive_skb(skb);
  573. }
  574. }
  575. static int ehea_proc_rwqes(struct net_device *dev,
  576. struct ehea_port_res *pr,
  577. int budget)
  578. {
  579. struct ehea_port *port = pr->port;
  580. struct ehea_qp *qp = pr->qp;
  581. struct ehea_cqe *cqe;
  582. struct sk_buff *skb;
  583. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  584. struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
  585. struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
  586. int skb_arr_rq1_len = pr->rq1_skba.len;
  587. int skb_arr_rq2_len = pr->rq2_skba.len;
  588. int skb_arr_rq3_len = pr->rq3_skba.len;
  589. int processed, processed_rq1, processed_rq2, processed_rq3;
  590. int wqe_index, last_wqe_index, rq, port_reset;
  591. processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
  592. last_wqe_index = 0;
  593. cqe = ehea_poll_rq1(qp, &wqe_index);
  594. while ((processed < budget) && cqe) {
  595. ehea_inc_rq1(qp);
  596. processed_rq1++;
  597. processed++;
  598. if (netif_msg_rx_status(port))
  599. ehea_dump(cqe, sizeof(*cqe), "CQE");
  600. last_wqe_index = wqe_index;
  601. rmb();
  602. if (!ehea_check_cqe(cqe, &rq)) {
  603. if (rq == 1) {
  604. /* LL RQ1 */
  605. skb = get_skb_by_index_ll(skb_arr_rq1,
  606. skb_arr_rq1_len,
  607. wqe_index);
  608. if (unlikely(!skb)) {
  609. if (netif_msg_rx_err(port))
  610. ehea_error("LL rq1: skb=NULL");
  611. skb = netdev_alloc_skb(dev,
  612. EHEA_L_PKT_SIZE);
  613. if (!skb)
  614. break;
  615. }
  616. skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
  617. cqe->num_bytes_transfered - 4);
  618. ehea_fill_skb(dev, skb, cqe);
  619. } else if (rq == 2) {
  620. /* RQ2 */
  621. skb = get_skb_by_index(skb_arr_rq2,
  622. skb_arr_rq2_len, cqe);
  623. if (unlikely(!skb)) {
  624. if (netif_msg_rx_err(port))
  625. ehea_error("rq2: skb=NULL");
  626. break;
  627. }
  628. ehea_fill_skb(dev, skb, cqe);
  629. processed_rq2++;
  630. } else {
  631. /* RQ3 */
  632. skb = get_skb_by_index(skb_arr_rq3,
  633. skb_arr_rq3_len, cqe);
  634. if (unlikely(!skb)) {
  635. if (netif_msg_rx_err(port))
  636. ehea_error("rq3: skb=NULL");
  637. break;
  638. }
  639. ehea_fill_skb(dev, skb, cqe);
  640. processed_rq3++;
  641. }
  642. ehea_proc_skb(pr, cqe, skb);
  643. } else {
  644. pr->p_stats.poll_receive_errors++;
  645. port_reset = ehea_treat_poll_error(pr, rq, cqe,
  646. &processed_rq2,
  647. &processed_rq3);
  648. if (port_reset)
  649. break;
  650. }
  651. cqe = ehea_poll_rq1(qp, &wqe_index);
  652. }
  653. if (use_lro)
  654. lro_flush_all(&pr->lro_mgr);
  655. pr->rx_packets += processed;
  656. ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
  657. ehea_refill_rq2(pr, processed_rq2);
  658. ehea_refill_rq3(pr, processed_rq3);
  659. return processed;
  660. }
  661. #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
  662. static void reset_sq_restart_flag(struct ehea_port *port)
  663. {
  664. int i;
  665. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  666. struct ehea_port_res *pr = &port->port_res[i];
  667. pr->sq_restart_flag = 0;
  668. }
  669. }
  670. static void check_sqs(struct ehea_port *port)
  671. {
  672. struct ehea_swqe *swqe;
  673. int swqe_index;
  674. int i, k;
  675. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  676. struct ehea_port_res *pr = &port->port_res[i];
  677. k = 0;
  678. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  679. memset(swqe, 0, SWQE_HEADER_SIZE);
  680. atomic_dec(&pr->swqe_avail);
  681. swqe->tx_control |= EHEA_SWQE_PURGE;
  682. swqe->wr_id = SWQE_RESTART_CHECK;
  683. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  684. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
  685. swqe->immediate_data_length = 80;
  686. ehea_post_swqe(pr->qp, swqe);
  687. while (pr->sq_restart_flag == 0) {
  688. msleep(5);
  689. if (++k == 100) {
  690. ehea_error("HW/SW queues out of sync");
  691. ehea_schedule_port_reset(pr->port);
  692. return;
  693. }
  694. }
  695. }
  696. return;
  697. }
  698. static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
  699. {
  700. struct sk_buff *skb;
  701. struct ehea_cq *send_cq = pr->send_cq;
  702. struct ehea_cqe *cqe;
  703. int quota = my_quota;
  704. int cqe_counter = 0;
  705. int swqe_av = 0;
  706. int index;
  707. unsigned long flags;
  708. cqe = ehea_poll_cq(send_cq);
  709. while (cqe && (quota > 0)) {
  710. ehea_inc_cq(send_cq);
  711. cqe_counter++;
  712. rmb();
  713. if (cqe->wr_id == SWQE_RESTART_CHECK) {
  714. pr->sq_restart_flag = 1;
  715. swqe_av++;
  716. break;
  717. }
  718. if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
  719. ehea_error("Bad send completion status=0x%04X",
  720. cqe->status);
  721. if (netif_msg_tx_err(pr->port))
  722. ehea_dump(cqe, sizeof(*cqe), "Send CQE");
  723. if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
  724. ehea_error("Resetting port");
  725. ehea_schedule_port_reset(pr->port);
  726. break;
  727. }
  728. }
  729. if (netif_msg_tx_done(pr->port))
  730. ehea_dump(cqe, sizeof(*cqe), "CQE");
  731. if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
  732. == EHEA_SWQE2_TYPE)) {
  733. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  734. skb = pr->sq_skba.arr[index];
  735. dev_kfree_skb(skb);
  736. pr->sq_skba.arr[index] = NULL;
  737. }
  738. swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
  739. quota--;
  740. cqe = ehea_poll_cq(send_cq);
  741. }
  742. ehea_update_feca(send_cq, cqe_counter);
  743. atomic_add(swqe_av, &pr->swqe_avail);
  744. spin_lock_irqsave(&pr->netif_queue, flags);
  745. if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
  746. >= pr->swqe_refill_th)) {
  747. netif_wake_queue(pr->port->netdev);
  748. pr->queue_stopped = 0;
  749. }
  750. spin_unlock_irqrestore(&pr->netif_queue, flags);
  751. return cqe;
  752. }
  753. #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
  754. #define EHEA_POLL_MAX_CQES 65535
  755. static int ehea_poll(struct napi_struct *napi, int budget)
  756. {
  757. struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
  758. napi);
  759. struct net_device *dev = pr->port->netdev;
  760. struct ehea_cqe *cqe;
  761. struct ehea_cqe *cqe_skb = NULL;
  762. int force_irq, wqe_index;
  763. int rx = 0;
  764. force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
  765. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  766. if (!force_irq)
  767. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  768. while ((rx != budget) || force_irq) {
  769. pr->poll_counter = 0;
  770. force_irq = 0;
  771. napi_complete(napi);
  772. ehea_reset_cq_ep(pr->recv_cq);
  773. ehea_reset_cq_ep(pr->send_cq);
  774. ehea_reset_cq_n1(pr->recv_cq);
  775. ehea_reset_cq_n1(pr->send_cq);
  776. rmb();
  777. cqe = ehea_poll_rq1(pr->qp, &wqe_index);
  778. cqe_skb = ehea_poll_cq(pr->send_cq);
  779. if (!cqe && !cqe_skb)
  780. return rx;
  781. if (!napi_reschedule(napi))
  782. return rx;
  783. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  784. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  785. }
  786. pr->poll_counter++;
  787. return rx;
  788. }
  789. #ifdef CONFIG_NET_POLL_CONTROLLER
  790. static void ehea_netpoll(struct net_device *dev)
  791. {
  792. struct ehea_port *port = netdev_priv(dev);
  793. int i;
  794. for (i = 0; i < port->num_def_qps; i++)
  795. napi_schedule(&port->port_res[i].napi);
  796. }
  797. #endif
  798. static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
  799. {
  800. struct ehea_port_res *pr = param;
  801. napi_schedule(&pr->napi);
  802. return IRQ_HANDLED;
  803. }
  804. static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
  805. {
  806. struct ehea_port *port = param;
  807. struct ehea_eqe *eqe;
  808. struct ehea_qp *qp;
  809. u32 qp_token;
  810. u64 resource_type, aer, aerr;
  811. int reset_port = 0;
  812. eqe = ehea_poll_eq(port->qp_eq);
  813. while (eqe) {
  814. qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
  815. ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
  816. eqe->entry, qp_token);
  817. qp = port->port_res[qp_token].qp;
  818. resource_type = ehea_error_data(port->adapter, qp->fw_handle,
  819. &aer, &aerr);
  820. if (resource_type == EHEA_AER_RESTYPE_QP) {
  821. if ((aer & EHEA_AER_RESET_MASK) ||
  822. (aerr & EHEA_AERR_RESET_MASK))
  823. reset_port = 1;
  824. } else
  825. reset_port = 1; /* Reset in case of CQ or EQ error */
  826. eqe = ehea_poll_eq(port->qp_eq);
  827. }
  828. if (reset_port) {
  829. ehea_error("Resetting port");
  830. ehea_schedule_port_reset(port);
  831. }
  832. return IRQ_HANDLED;
  833. }
  834. static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
  835. int logical_port)
  836. {
  837. int i;
  838. for (i = 0; i < EHEA_MAX_PORTS; i++)
  839. if (adapter->port[i])
  840. if (adapter->port[i]->logical_port_id == logical_port)
  841. return adapter->port[i];
  842. return NULL;
  843. }
  844. int ehea_sense_port_attr(struct ehea_port *port)
  845. {
  846. int ret;
  847. u64 hret;
  848. struct hcp_ehea_port_cb0 *cb0;
  849. /* may be called via ehea_neq_tasklet() */
  850. cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
  851. if (!cb0) {
  852. ehea_error("no mem for cb0");
  853. ret = -ENOMEM;
  854. goto out;
  855. }
  856. hret = ehea_h_query_ehea_port(port->adapter->handle,
  857. port->logical_port_id, H_PORT_CB0,
  858. EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
  859. cb0);
  860. if (hret != H_SUCCESS) {
  861. ret = -EIO;
  862. goto out_free;
  863. }
  864. /* MAC address */
  865. port->mac_addr = cb0->port_mac_addr << 16;
  866. if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
  867. ret = -EADDRNOTAVAIL;
  868. goto out_free;
  869. }
  870. /* Port speed */
  871. switch (cb0->port_speed) {
  872. case H_SPEED_10M_H:
  873. port->port_speed = EHEA_SPEED_10M;
  874. port->full_duplex = 0;
  875. break;
  876. case H_SPEED_10M_F:
  877. port->port_speed = EHEA_SPEED_10M;
  878. port->full_duplex = 1;
  879. break;
  880. case H_SPEED_100M_H:
  881. port->port_speed = EHEA_SPEED_100M;
  882. port->full_duplex = 0;
  883. break;
  884. case H_SPEED_100M_F:
  885. port->port_speed = EHEA_SPEED_100M;
  886. port->full_duplex = 1;
  887. break;
  888. case H_SPEED_1G_F:
  889. port->port_speed = EHEA_SPEED_1G;
  890. port->full_duplex = 1;
  891. break;
  892. case H_SPEED_10G_F:
  893. port->port_speed = EHEA_SPEED_10G;
  894. port->full_duplex = 1;
  895. break;
  896. default:
  897. port->port_speed = 0;
  898. port->full_duplex = 0;
  899. break;
  900. }
  901. port->autoneg = 1;
  902. port->num_mcs = cb0->num_default_qps;
  903. /* Number of default QPs */
  904. if (use_mcs)
  905. port->num_def_qps = cb0->num_default_qps;
  906. else
  907. port->num_def_qps = 1;
  908. if (!port->num_def_qps) {
  909. ret = -EINVAL;
  910. goto out_free;
  911. }
  912. port->num_tx_qps = num_tx_qps;
  913. if (port->num_def_qps >= port->num_tx_qps)
  914. port->num_add_tx_qps = 0;
  915. else
  916. port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
  917. ret = 0;
  918. out_free:
  919. if (ret || netif_msg_probe(port))
  920. ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
  921. free_page((unsigned long)cb0);
  922. out:
  923. return ret;
  924. }
  925. int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
  926. {
  927. struct hcp_ehea_port_cb4 *cb4;
  928. u64 hret;
  929. int ret = 0;
  930. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  931. if (!cb4) {
  932. ehea_error("no mem for cb4");
  933. ret = -ENOMEM;
  934. goto out;
  935. }
  936. cb4->port_speed = port_speed;
  937. netif_carrier_off(port->netdev);
  938. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  939. port->logical_port_id,
  940. H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
  941. if (hret == H_SUCCESS) {
  942. port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
  943. hret = ehea_h_query_ehea_port(port->adapter->handle,
  944. port->logical_port_id,
  945. H_PORT_CB4, H_PORT_CB4_SPEED,
  946. cb4);
  947. if (hret == H_SUCCESS) {
  948. switch (cb4->port_speed) {
  949. case H_SPEED_10M_H:
  950. port->port_speed = EHEA_SPEED_10M;
  951. port->full_duplex = 0;
  952. break;
  953. case H_SPEED_10M_F:
  954. port->port_speed = EHEA_SPEED_10M;
  955. port->full_duplex = 1;
  956. break;
  957. case H_SPEED_100M_H:
  958. port->port_speed = EHEA_SPEED_100M;
  959. port->full_duplex = 0;
  960. break;
  961. case H_SPEED_100M_F:
  962. port->port_speed = EHEA_SPEED_100M;
  963. port->full_duplex = 1;
  964. break;
  965. case H_SPEED_1G_F:
  966. port->port_speed = EHEA_SPEED_1G;
  967. port->full_duplex = 1;
  968. break;
  969. case H_SPEED_10G_F:
  970. port->port_speed = EHEA_SPEED_10G;
  971. port->full_duplex = 1;
  972. break;
  973. default:
  974. port->port_speed = 0;
  975. port->full_duplex = 0;
  976. break;
  977. }
  978. } else {
  979. ehea_error("Failed sensing port speed");
  980. ret = -EIO;
  981. }
  982. } else {
  983. if (hret == H_AUTHORITY) {
  984. ehea_info("Hypervisor denied setting port speed");
  985. ret = -EPERM;
  986. } else {
  987. ret = -EIO;
  988. ehea_error("Failed setting port speed");
  989. }
  990. }
  991. if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
  992. netif_carrier_on(port->netdev);
  993. free_page((unsigned long)cb4);
  994. out:
  995. return ret;
  996. }
  997. static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
  998. {
  999. int ret;
  1000. u8 ec;
  1001. u8 portnum;
  1002. struct ehea_port *port;
  1003. ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
  1004. portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
  1005. port = ehea_get_port(adapter, portnum);
  1006. switch (ec) {
  1007. case EHEA_EC_PORTSTATE_CHG: /* port state change */
  1008. if (!port) {
  1009. ehea_error("unknown portnum %x", portnum);
  1010. break;
  1011. }
  1012. if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
  1013. if (!netif_carrier_ok(port->netdev)) {
  1014. ret = ehea_sense_port_attr(port);
  1015. if (ret) {
  1016. ehea_error("failed resensing port "
  1017. "attributes");
  1018. break;
  1019. }
  1020. if (netif_msg_link(port))
  1021. ehea_info("%s: Logical port up: %dMbps "
  1022. "%s Duplex",
  1023. port->netdev->name,
  1024. port->port_speed,
  1025. port->full_duplex ==
  1026. 1 ? "Full" : "Half");
  1027. netif_carrier_on(port->netdev);
  1028. netif_wake_queue(port->netdev);
  1029. }
  1030. } else
  1031. if (netif_carrier_ok(port->netdev)) {
  1032. if (netif_msg_link(port))
  1033. ehea_info("%s: Logical port down",
  1034. port->netdev->name);
  1035. netif_carrier_off(port->netdev);
  1036. netif_stop_queue(port->netdev);
  1037. }
  1038. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
  1039. port->phy_link = EHEA_PHY_LINK_UP;
  1040. if (netif_msg_link(port))
  1041. ehea_info("%s: Physical port up",
  1042. port->netdev->name);
  1043. if (prop_carrier_state)
  1044. netif_carrier_on(port->netdev);
  1045. } else {
  1046. port->phy_link = EHEA_PHY_LINK_DOWN;
  1047. if (netif_msg_link(port))
  1048. ehea_info("%s: Physical port down",
  1049. port->netdev->name);
  1050. if (prop_carrier_state)
  1051. netif_carrier_off(port->netdev);
  1052. }
  1053. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
  1054. ehea_info("External switch port is primary port");
  1055. else
  1056. ehea_info("External switch port is backup port");
  1057. break;
  1058. case EHEA_EC_ADAPTER_MALFUNC:
  1059. ehea_error("Adapter malfunction");
  1060. break;
  1061. case EHEA_EC_PORT_MALFUNC:
  1062. ehea_info("Port malfunction: Device: %s", port->netdev->name);
  1063. netif_carrier_off(port->netdev);
  1064. netif_stop_queue(port->netdev);
  1065. break;
  1066. default:
  1067. ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
  1068. break;
  1069. }
  1070. }
  1071. static void ehea_neq_tasklet(unsigned long data)
  1072. {
  1073. struct ehea_adapter *adapter = (struct ehea_adapter *)data;
  1074. struct ehea_eqe *eqe;
  1075. u64 event_mask;
  1076. eqe = ehea_poll_eq(adapter->neq);
  1077. ehea_debug("eqe=%p", eqe);
  1078. while (eqe) {
  1079. ehea_debug("*eqe=%lx", eqe->entry);
  1080. ehea_parse_eqe(adapter, eqe->entry);
  1081. eqe = ehea_poll_eq(adapter->neq);
  1082. ehea_debug("next eqe=%p", eqe);
  1083. }
  1084. event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
  1085. | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
  1086. | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
  1087. ehea_h_reset_events(adapter->handle,
  1088. adapter->neq->fw_handle, event_mask);
  1089. }
  1090. static irqreturn_t ehea_interrupt_neq(int irq, void *param)
  1091. {
  1092. struct ehea_adapter *adapter = param;
  1093. tasklet_hi_schedule(&adapter->neq_tasklet);
  1094. return IRQ_HANDLED;
  1095. }
  1096. static int ehea_fill_port_res(struct ehea_port_res *pr)
  1097. {
  1098. int ret;
  1099. struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
  1100. ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
  1101. - init_attr->act_nr_rwqes_rq2
  1102. - init_attr->act_nr_rwqes_rq3 - 1);
  1103. ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
  1104. ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
  1105. return ret;
  1106. }
  1107. static int ehea_reg_interrupts(struct net_device *dev)
  1108. {
  1109. struct ehea_port *port = netdev_priv(dev);
  1110. struct ehea_port_res *pr;
  1111. int i, ret;
  1112. snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
  1113. dev->name);
  1114. ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
  1115. ehea_qp_aff_irq_handler,
  1116. IRQF_DISABLED, port->int_aff_name, port);
  1117. if (ret) {
  1118. ehea_error("failed registering irq for qp_aff_irq_handler:"
  1119. "ist=%X", port->qp_eq->attr.ist1);
  1120. goto out_free_qpeq;
  1121. }
  1122. if (netif_msg_ifup(port))
  1123. ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
  1124. "registered", port->qp_eq->attr.ist1);
  1125. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  1126. pr = &port->port_res[i];
  1127. snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
  1128. "%s-queue%d", dev->name, i);
  1129. ret = ibmebus_request_irq(pr->eq->attr.ist1,
  1130. ehea_recv_irq_handler,
  1131. IRQF_DISABLED, pr->int_send_name,
  1132. pr);
  1133. if (ret) {
  1134. ehea_error("failed registering irq for ehea_queue "
  1135. "port_res_nr:%d, ist=%X", i,
  1136. pr->eq->attr.ist1);
  1137. goto out_free_req;
  1138. }
  1139. if (netif_msg_ifup(port))
  1140. ehea_info("irq_handle 0x%X for function ehea_queue_int "
  1141. "%d registered", pr->eq->attr.ist1, i);
  1142. }
  1143. out:
  1144. return ret;
  1145. out_free_req:
  1146. while (--i >= 0) {
  1147. u32 ist = port->port_res[i].eq->attr.ist1;
  1148. ibmebus_free_irq(ist, &port->port_res[i]);
  1149. }
  1150. out_free_qpeq:
  1151. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1152. i = port->num_def_qps;
  1153. goto out;
  1154. }
  1155. static void ehea_free_interrupts(struct net_device *dev)
  1156. {
  1157. struct ehea_port *port = netdev_priv(dev);
  1158. struct ehea_port_res *pr;
  1159. int i;
  1160. /* send */
  1161. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  1162. pr = &port->port_res[i];
  1163. ibmebus_free_irq(pr->eq->attr.ist1, pr);
  1164. if (netif_msg_intr(port))
  1165. ehea_info("free send irq for res %d with handle 0x%X",
  1166. i, pr->eq->attr.ist1);
  1167. }
  1168. /* associated events */
  1169. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1170. if (netif_msg_intr(port))
  1171. ehea_info("associated event interrupt for handle 0x%X freed",
  1172. port->qp_eq->attr.ist1);
  1173. }
  1174. static int ehea_configure_port(struct ehea_port *port)
  1175. {
  1176. int ret, i;
  1177. u64 hret, mask;
  1178. struct hcp_ehea_port_cb0 *cb0;
  1179. ret = -ENOMEM;
  1180. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1181. if (!cb0)
  1182. goto out;
  1183. cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
  1184. | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
  1185. | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
  1186. | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
  1187. | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
  1188. PXLY_RC_VLAN_FILTER)
  1189. | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
  1190. for (i = 0; i < port->num_mcs; i++)
  1191. if (use_mcs)
  1192. cb0->default_qpn_arr[i] =
  1193. port->port_res[i].qp->init_attr.qp_nr;
  1194. else
  1195. cb0->default_qpn_arr[i] =
  1196. port->port_res[0].qp->init_attr.qp_nr;
  1197. if (netif_msg_ifup(port))
  1198. ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
  1199. mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
  1200. | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
  1201. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1202. port->logical_port_id,
  1203. H_PORT_CB0, mask, cb0);
  1204. ret = -EIO;
  1205. if (hret != H_SUCCESS)
  1206. goto out_free;
  1207. ret = 0;
  1208. out_free:
  1209. free_page((unsigned long)cb0);
  1210. out:
  1211. return ret;
  1212. }
  1213. int ehea_gen_smrs(struct ehea_port_res *pr)
  1214. {
  1215. int ret;
  1216. struct ehea_adapter *adapter = pr->port->adapter;
  1217. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
  1218. if (ret)
  1219. goto out;
  1220. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
  1221. if (ret)
  1222. goto out_free;
  1223. return 0;
  1224. out_free:
  1225. ehea_rem_mr(&pr->send_mr);
  1226. out:
  1227. ehea_error("Generating SMRS failed\n");
  1228. return -EIO;
  1229. }
  1230. int ehea_rem_smrs(struct ehea_port_res *pr)
  1231. {
  1232. if ((ehea_rem_mr(&pr->send_mr)) ||
  1233. (ehea_rem_mr(&pr->recv_mr)))
  1234. return -EIO;
  1235. else
  1236. return 0;
  1237. }
  1238. static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
  1239. {
  1240. int arr_size = sizeof(void *) * max_q_entries;
  1241. q_skba->arr = vmalloc(arr_size);
  1242. if (!q_skba->arr)
  1243. return -ENOMEM;
  1244. memset(q_skba->arr, 0, arr_size);
  1245. q_skba->len = max_q_entries;
  1246. q_skba->index = 0;
  1247. q_skba->os_skbs = 0;
  1248. return 0;
  1249. }
  1250. static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
  1251. struct port_res_cfg *pr_cfg, int queue_token)
  1252. {
  1253. struct ehea_adapter *adapter = port->adapter;
  1254. enum ehea_eq_type eq_type = EHEA_EQ;
  1255. struct ehea_qp_init_attr *init_attr = NULL;
  1256. int ret = -EIO;
  1257. memset(pr, 0, sizeof(struct ehea_port_res));
  1258. pr->port = port;
  1259. spin_lock_init(&pr->xmit_lock);
  1260. spin_lock_init(&pr->netif_queue);
  1261. pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
  1262. if (!pr->eq) {
  1263. ehea_error("create_eq failed (eq)");
  1264. goto out_free;
  1265. }
  1266. pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
  1267. pr->eq->fw_handle,
  1268. port->logical_port_id);
  1269. if (!pr->recv_cq) {
  1270. ehea_error("create_cq failed (cq_recv)");
  1271. goto out_free;
  1272. }
  1273. pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
  1274. pr->eq->fw_handle,
  1275. port->logical_port_id);
  1276. if (!pr->send_cq) {
  1277. ehea_error("create_cq failed (cq_send)");
  1278. goto out_free;
  1279. }
  1280. if (netif_msg_ifup(port))
  1281. ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
  1282. pr->send_cq->attr.act_nr_of_cqes,
  1283. pr->recv_cq->attr.act_nr_of_cqes);
  1284. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  1285. if (!init_attr) {
  1286. ret = -ENOMEM;
  1287. ehea_error("no mem for ehea_qp_init_attr");
  1288. goto out_free;
  1289. }
  1290. init_attr->low_lat_rq1 = 1;
  1291. init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
  1292. init_attr->rq_count = 3;
  1293. init_attr->qp_token = queue_token;
  1294. init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
  1295. init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
  1296. init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
  1297. init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
  1298. init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
  1299. init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
  1300. init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
  1301. init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
  1302. init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
  1303. init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
  1304. init_attr->port_nr = port->logical_port_id;
  1305. init_attr->send_cq_handle = pr->send_cq->fw_handle;
  1306. init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
  1307. init_attr->aff_eq_handle = port->qp_eq->fw_handle;
  1308. pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
  1309. if (!pr->qp) {
  1310. ehea_error("create_qp failed");
  1311. ret = -EIO;
  1312. goto out_free;
  1313. }
  1314. if (netif_msg_ifup(port))
  1315. ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
  1316. "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
  1317. init_attr->act_nr_send_wqes,
  1318. init_attr->act_nr_rwqes_rq1,
  1319. init_attr->act_nr_rwqes_rq2,
  1320. init_attr->act_nr_rwqes_rq3);
  1321. pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
  1322. ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
  1323. ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
  1324. ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
  1325. ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
  1326. if (ret)
  1327. goto out_free;
  1328. pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
  1329. if (ehea_gen_smrs(pr) != 0) {
  1330. ret = -EIO;
  1331. goto out_free;
  1332. }
  1333. atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
  1334. kfree(init_attr);
  1335. netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
  1336. pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
  1337. pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
  1338. pr->lro_mgr.lro_arr = pr->lro_desc;
  1339. pr->lro_mgr.get_skb_header = get_skb_hdr;
  1340. pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
  1341. pr->lro_mgr.dev = port->netdev;
  1342. pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
  1343. pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
  1344. ret = 0;
  1345. goto out;
  1346. out_free:
  1347. kfree(init_attr);
  1348. vfree(pr->sq_skba.arr);
  1349. vfree(pr->rq1_skba.arr);
  1350. vfree(pr->rq2_skba.arr);
  1351. vfree(pr->rq3_skba.arr);
  1352. ehea_destroy_qp(pr->qp);
  1353. ehea_destroy_cq(pr->send_cq);
  1354. ehea_destroy_cq(pr->recv_cq);
  1355. ehea_destroy_eq(pr->eq);
  1356. out:
  1357. return ret;
  1358. }
  1359. static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
  1360. {
  1361. int ret, i;
  1362. if (pr->qp)
  1363. netif_napi_del(&pr->napi);
  1364. ret = ehea_destroy_qp(pr->qp);
  1365. if (!ret) {
  1366. ehea_destroy_cq(pr->send_cq);
  1367. ehea_destroy_cq(pr->recv_cq);
  1368. ehea_destroy_eq(pr->eq);
  1369. for (i = 0; i < pr->rq1_skba.len; i++)
  1370. if (pr->rq1_skba.arr[i])
  1371. dev_kfree_skb(pr->rq1_skba.arr[i]);
  1372. for (i = 0; i < pr->rq2_skba.len; i++)
  1373. if (pr->rq2_skba.arr[i])
  1374. dev_kfree_skb(pr->rq2_skba.arr[i]);
  1375. for (i = 0; i < pr->rq3_skba.len; i++)
  1376. if (pr->rq3_skba.arr[i])
  1377. dev_kfree_skb(pr->rq3_skba.arr[i]);
  1378. for (i = 0; i < pr->sq_skba.len; i++)
  1379. if (pr->sq_skba.arr[i])
  1380. dev_kfree_skb(pr->sq_skba.arr[i]);
  1381. vfree(pr->rq1_skba.arr);
  1382. vfree(pr->rq2_skba.arr);
  1383. vfree(pr->rq3_skba.arr);
  1384. vfree(pr->sq_skba.arr);
  1385. ret = ehea_rem_smrs(pr);
  1386. }
  1387. return ret;
  1388. }
  1389. /*
  1390. * The write_* functions store information in swqe which is used by
  1391. * the hardware to calculate the ip/tcp/udp checksum
  1392. */
  1393. static inline void write_ip_start_end(struct ehea_swqe *swqe,
  1394. const struct sk_buff *skb)
  1395. {
  1396. swqe->ip_start = skb_network_offset(skb);
  1397. swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
  1398. }
  1399. static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
  1400. const struct sk_buff *skb)
  1401. {
  1402. swqe->tcp_offset =
  1403. (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
  1404. swqe->tcp_end = (u16)skb->len - 1;
  1405. }
  1406. static inline void write_udp_offset_end(struct ehea_swqe *swqe,
  1407. const struct sk_buff *skb)
  1408. {
  1409. swqe->tcp_offset =
  1410. (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
  1411. swqe->tcp_end = (u16)skb->len - 1;
  1412. }
  1413. static void write_swqe2_TSO(struct sk_buff *skb,
  1414. struct ehea_swqe *swqe, u32 lkey)
  1415. {
  1416. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1417. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1418. int skb_data_size = skb_headlen(skb);
  1419. int headersize;
  1420. /* Packet is TCP with TSO enabled */
  1421. swqe->tx_control |= EHEA_SWQE_TSO;
  1422. swqe->mss = skb_shinfo(skb)->gso_size;
  1423. /* copy only eth/ip/tcp headers to immediate data and
  1424. * the rest of skb->data to sg1entry
  1425. */
  1426. headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
  1427. skb_data_size = skb_headlen(skb);
  1428. if (skb_data_size >= headersize) {
  1429. /* copy immediate data */
  1430. skb_copy_from_linear_data(skb, imm_data, headersize);
  1431. swqe->immediate_data_length = headersize;
  1432. if (skb_data_size > headersize) {
  1433. /* set sg1entry data */
  1434. sg1entry->l_key = lkey;
  1435. sg1entry->len = skb_data_size - headersize;
  1436. sg1entry->vaddr =
  1437. ehea_map_vaddr(skb->data + headersize);
  1438. swqe->descriptors++;
  1439. }
  1440. } else
  1441. ehea_error("cannot handle fragmented headers");
  1442. }
  1443. static void write_swqe2_nonTSO(struct sk_buff *skb,
  1444. struct ehea_swqe *swqe, u32 lkey)
  1445. {
  1446. int skb_data_size = skb_headlen(skb);
  1447. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1448. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1449. /* Packet is any nonTSO type
  1450. *
  1451. * Copy as much as possible skb->data to immediate data and
  1452. * the rest to sg1entry
  1453. */
  1454. if (skb_data_size >= SWQE2_MAX_IMM) {
  1455. /* copy immediate data */
  1456. skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
  1457. swqe->immediate_data_length = SWQE2_MAX_IMM;
  1458. if (skb_data_size > SWQE2_MAX_IMM) {
  1459. /* copy sg1entry data */
  1460. sg1entry->l_key = lkey;
  1461. sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
  1462. sg1entry->vaddr =
  1463. ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
  1464. swqe->descriptors++;
  1465. }
  1466. } else {
  1467. skb_copy_from_linear_data(skb, imm_data, skb_data_size);
  1468. swqe->immediate_data_length = skb_data_size;
  1469. }
  1470. }
  1471. static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
  1472. struct ehea_swqe *swqe, u32 lkey)
  1473. {
  1474. struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
  1475. skb_frag_t *frag;
  1476. int nfrags, sg1entry_contains_frag_data, i;
  1477. nfrags = skb_shinfo(skb)->nr_frags;
  1478. sg1entry = &swqe->u.immdata_desc.sg_entry;
  1479. sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
  1480. swqe->descriptors = 0;
  1481. sg1entry_contains_frag_data = 0;
  1482. if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
  1483. write_swqe2_TSO(skb, swqe, lkey);
  1484. else
  1485. write_swqe2_nonTSO(skb, swqe, lkey);
  1486. /* write descriptors */
  1487. if (nfrags > 0) {
  1488. if (swqe->descriptors == 0) {
  1489. /* sg1entry not yet used */
  1490. frag = &skb_shinfo(skb)->frags[0];
  1491. /* copy sg1entry data */
  1492. sg1entry->l_key = lkey;
  1493. sg1entry->len = frag->size;
  1494. sg1entry->vaddr =
  1495. ehea_map_vaddr(page_address(frag->page)
  1496. + frag->page_offset);
  1497. swqe->descriptors++;
  1498. sg1entry_contains_frag_data = 1;
  1499. }
  1500. for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
  1501. frag = &skb_shinfo(skb)->frags[i];
  1502. sgentry = &sg_list[i - sg1entry_contains_frag_data];
  1503. sgentry->l_key = lkey;
  1504. sgentry->len = frag->size;
  1505. sgentry->vaddr =
  1506. ehea_map_vaddr(page_address(frag->page)
  1507. + frag->page_offset);
  1508. swqe->descriptors++;
  1509. }
  1510. }
  1511. }
  1512. static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
  1513. {
  1514. int ret = 0;
  1515. u64 hret;
  1516. u8 reg_type;
  1517. /* De/Register untagged packets */
  1518. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
  1519. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1520. port->logical_port_id,
  1521. reg_type, port->mac_addr, 0, hcallid);
  1522. if (hret != H_SUCCESS) {
  1523. ehea_error("%sregistering bc address failed (tagged)",
  1524. hcallid == H_REG_BCMC ? "" : "de");
  1525. ret = -EIO;
  1526. goto out_herr;
  1527. }
  1528. /* De/Register VLAN packets */
  1529. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
  1530. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1531. port->logical_port_id,
  1532. reg_type, port->mac_addr, 0, hcallid);
  1533. if (hret != H_SUCCESS) {
  1534. ehea_error("%sregistering bc address failed (vlan)",
  1535. hcallid == H_REG_BCMC ? "" : "de");
  1536. ret = -EIO;
  1537. }
  1538. out_herr:
  1539. return ret;
  1540. }
  1541. static int ehea_set_mac_addr(struct net_device *dev, void *sa)
  1542. {
  1543. struct ehea_port *port = netdev_priv(dev);
  1544. struct sockaddr *mac_addr = sa;
  1545. struct hcp_ehea_port_cb0 *cb0;
  1546. int ret;
  1547. u64 hret;
  1548. if (!is_valid_ether_addr(mac_addr->sa_data)) {
  1549. ret = -EADDRNOTAVAIL;
  1550. goto out;
  1551. }
  1552. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1553. if (!cb0) {
  1554. ehea_error("no mem for cb0");
  1555. ret = -ENOMEM;
  1556. goto out;
  1557. }
  1558. memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
  1559. cb0->port_mac_addr = cb0->port_mac_addr >> 16;
  1560. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1561. port->logical_port_id, H_PORT_CB0,
  1562. EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
  1563. if (hret != H_SUCCESS) {
  1564. ret = -EIO;
  1565. goto out_free;
  1566. }
  1567. memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
  1568. /* Deregister old MAC in pHYP */
  1569. if (port->state == EHEA_PORT_UP) {
  1570. ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1571. if (ret)
  1572. goto out_upregs;
  1573. }
  1574. port->mac_addr = cb0->port_mac_addr << 16;
  1575. /* Register new MAC in pHYP */
  1576. if (port->state == EHEA_PORT_UP) {
  1577. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1578. if (ret)
  1579. goto out_upregs;
  1580. }
  1581. ret = 0;
  1582. out_upregs:
  1583. ehea_update_bcmc_registrations();
  1584. out_free:
  1585. free_page((unsigned long)cb0);
  1586. out:
  1587. return ret;
  1588. }
  1589. static void ehea_promiscuous_error(u64 hret, int enable)
  1590. {
  1591. if (hret == H_AUTHORITY)
  1592. ehea_info("Hypervisor denied %sabling promiscuous mode",
  1593. enable == 1 ? "en" : "dis");
  1594. else
  1595. ehea_error("failed %sabling promiscuous mode",
  1596. enable == 1 ? "en" : "dis");
  1597. }
  1598. static void ehea_promiscuous(struct net_device *dev, int enable)
  1599. {
  1600. struct ehea_port *port = netdev_priv(dev);
  1601. struct hcp_ehea_port_cb7 *cb7;
  1602. u64 hret;
  1603. if ((enable && port->promisc) || (!enable && !port->promisc))
  1604. return;
  1605. cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
  1606. if (!cb7) {
  1607. ehea_error("no mem for cb7");
  1608. goto out;
  1609. }
  1610. /* Modify Pxs_DUCQPN in CB7 */
  1611. cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
  1612. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1613. port->logical_port_id,
  1614. H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
  1615. if (hret) {
  1616. ehea_promiscuous_error(hret, enable);
  1617. goto out;
  1618. }
  1619. port->promisc = enable;
  1620. out:
  1621. free_page((unsigned long)cb7);
  1622. }
  1623. static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
  1624. u32 hcallid)
  1625. {
  1626. u64 hret;
  1627. u8 reg_type;
  1628. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1629. | EHEA_BCMC_UNTAGGED;
  1630. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1631. port->logical_port_id,
  1632. reg_type, mc_mac_addr, 0, hcallid);
  1633. if (hret)
  1634. goto out;
  1635. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1636. | EHEA_BCMC_VLANID_ALL;
  1637. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1638. port->logical_port_id,
  1639. reg_type, mc_mac_addr, 0, hcallid);
  1640. out:
  1641. return hret;
  1642. }
  1643. static int ehea_drop_multicast_list(struct net_device *dev)
  1644. {
  1645. struct ehea_port *port = netdev_priv(dev);
  1646. struct ehea_mc_list *mc_entry = port->mc_list;
  1647. struct list_head *pos;
  1648. struct list_head *temp;
  1649. int ret = 0;
  1650. u64 hret;
  1651. list_for_each_safe(pos, temp, &(port->mc_list->list)) {
  1652. mc_entry = list_entry(pos, struct ehea_mc_list, list);
  1653. hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
  1654. H_DEREG_BCMC);
  1655. if (hret) {
  1656. ehea_error("failed deregistering mcast MAC");
  1657. ret = -EIO;
  1658. }
  1659. list_del(pos);
  1660. kfree(mc_entry);
  1661. }
  1662. return ret;
  1663. }
  1664. static void ehea_allmulti(struct net_device *dev, int enable)
  1665. {
  1666. struct ehea_port *port = netdev_priv(dev);
  1667. u64 hret;
  1668. if (!port->allmulti) {
  1669. if (enable) {
  1670. /* Enable ALLMULTI */
  1671. ehea_drop_multicast_list(dev);
  1672. hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
  1673. if (!hret)
  1674. port->allmulti = 1;
  1675. else
  1676. ehea_error("failed enabling IFF_ALLMULTI");
  1677. }
  1678. } else
  1679. if (!enable) {
  1680. /* Disable ALLMULTI */
  1681. hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
  1682. if (!hret)
  1683. port->allmulti = 0;
  1684. else
  1685. ehea_error("failed disabling IFF_ALLMULTI");
  1686. }
  1687. }
  1688. static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
  1689. {
  1690. struct ehea_mc_list *ehea_mcl_entry;
  1691. u64 hret;
  1692. ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
  1693. if (!ehea_mcl_entry) {
  1694. ehea_error("no mem for mcl_entry");
  1695. return;
  1696. }
  1697. INIT_LIST_HEAD(&ehea_mcl_entry->list);
  1698. memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
  1699. hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
  1700. H_REG_BCMC);
  1701. if (!hret)
  1702. list_add(&ehea_mcl_entry->list, &port->mc_list->list);
  1703. else {
  1704. ehea_error("failed registering mcast MAC");
  1705. kfree(ehea_mcl_entry);
  1706. }
  1707. }
  1708. static void ehea_set_multicast_list(struct net_device *dev)
  1709. {
  1710. struct ehea_port *port = netdev_priv(dev);
  1711. struct netdev_hw_addr *ha;
  1712. int ret;
  1713. if (dev->flags & IFF_PROMISC) {
  1714. ehea_promiscuous(dev, 1);
  1715. return;
  1716. }
  1717. ehea_promiscuous(dev, 0);
  1718. if (dev->flags & IFF_ALLMULTI) {
  1719. ehea_allmulti(dev, 1);
  1720. goto out;
  1721. }
  1722. ehea_allmulti(dev, 0);
  1723. if (!netdev_mc_empty(dev)) {
  1724. ret = ehea_drop_multicast_list(dev);
  1725. if (ret) {
  1726. /* Dropping the current multicast list failed.
  1727. * Enabling ALL_MULTI is the best we can do.
  1728. */
  1729. ehea_allmulti(dev, 1);
  1730. }
  1731. if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
  1732. ehea_info("Mcast registration limit reached (0x%llx). "
  1733. "Use ALLMULTI!",
  1734. port->adapter->max_mc_mac);
  1735. goto out;
  1736. }
  1737. netdev_for_each_mc_addr(ha, dev)
  1738. ehea_add_multicast_entry(port, ha->addr);
  1739. }
  1740. out:
  1741. ehea_update_bcmc_registrations();
  1742. }
  1743. static int ehea_change_mtu(struct net_device *dev, int new_mtu)
  1744. {
  1745. if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
  1746. return -EINVAL;
  1747. dev->mtu = new_mtu;
  1748. return 0;
  1749. }
  1750. static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
  1751. struct ehea_swqe *swqe, u32 lkey)
  1752. {
  1753. if (skb->protocol == htons(ETH_P_IP)) {
  1754. const struct iphdr *iph = ip_hdr(skb);
  1755. /* IPv4 */
  1756. swqe->tx_control |= EHEA_SWQE_CRC
  1757. | EHEA_SWQE_IP_CHECKSUM
  1758. | EHEA_SWQE_TCP_CHECKSUM
  1759. | EHEA_SWQE_IMM_DATA_PRESENT
  1760. | EHEA_SWQE_DESCRIPTORS_PRESENT;
  1761. write_ip_start_end(swqe, skb);
  1762. if (iph->protocol == IPPROTO_UDP) {
  1763. if ((iph->frag_off & IP_MF) ||
  1764. (iph->frag_off & IP_OFFSET))
  1765. /* IP fragment, so don't change cs */
  1766. swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
  1767. else
  1768. write_udp_offset_end(swqe, skb);
  1769. } else if (iph->protocol == IPPROTO_TCP) {
  1770. write_tcp_offset_end(swqe, skb);
  1771. }
  1772. /* icmp (big data) and ip segmentation packets (all other ip
  1773. packets) do not require any special handling */
  1774. } else {
  1775. /* Other Ethernet Protocol */
  1776. swqe->tx_control |= EHEA_SWQE_CRC
  1777. | EHEA_SWQE_IMM_DATA_PRESENT
  1778. | EHEA_SWQE_DESCRIPTORS_PRESENT;
  1779. }
  1780. write_swqe2_data(skb, dev, swqe, lkey);
  1781. }
  1782. static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
  1783. struct ehea_swqe *swqe)
  1784. {
  1785. int nfrags = skb_shinfo(skb)->nr_frags;
  1786. u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
  1787. skb_frag_t *frag;
  1788. int i;
  1789. if (skb->protocol == htons(ETH_P_IP)) {
  1790. const struct iphdr *iph = ip_hdr(skb);
  1791. /* IPv4 */
  1792. write_ip_start_end(swqe, skb);
  1793. if (iph->protocol == IPPROTO_TCP) {
  1794. swqe->tx_control |= EHEA_SWQE_CRC
  1795. | EHEA_SWQE_IP_CHECKSUM
  1796. | EHEA_SWQE_TCP_CHECKSUM
  1797. | EHEA_SWQE_IMM_DATA_PRESENT;
  1798. write_tcp_offset_end(swqe, skb);
  1799. } else if (iph->protocol == IPPROTO_UDP) {
  1800. if ((iph->frag_off & IP_MF) ||
  1801. (iph->frag_off & IP_OFFSET))
  1802. /* IP fragment, so don't change cs */
  1803. swqe->tx_control |= EHEA_SWQE_CRC
  1804. | EHEA_SWQE_IMM_DATA_PRESENT;
  1805. else {
  1806. swqe->tx_control |= EHEA_SWQE_CRC
  1807. | EHEA_SWQE_IP_CHECKSUM
  1808. | EHEA_SWQE_TCP_CHECKSUM
  1809. | EHEA_SWQE_IMM_DATA_PRESENT;
  1810. write_udp_offset_end(swqe, skb);
  1811. }
  1812. } else {
  1813. /* icmp (big data) and
  1814. ip segmentation packets (all other ip packets) */
  1815. swqe->tx_control |= EHEA_SWQE_CRC
  1816. | EHEA_SWQE_IP_CHECKSUM
  1817. | EHEA_SWQE_IMM_DATA_PRESENT;
  1818. }
  1819. } else {
  1820. /* Other Ethernet Protocol */
  1821. swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
  1822. }
  1823. /* copy (immediate) data */
  1824. if (nfrags == 0) {
  1825. /* data is in a single piece */
  1826. skb_copy_from_linear_data(skb, imm_data, skb->len);
  1827. } else {
  1828. /* first copy data from the skb->data buffer ... */
  1829. skb_copy_from_linear_data(skb, imm_data,
  1830. skb_headlen(skb));
  1831. imm_data += skb_headlen(skb);
  1832. /* ... then copy data from the fragments */
  1833. for (i = 0; i < nfrags; i++) {
  1834. frag = &skb_shinfo(skb)->frags[i];
  1835. memcpy(imm_data,
  1836. page_address(frag->page) + frag->page_offset,
  1837. frag->size);
  1838. imm_data += frag->size;
  1839. }
  1840. }
  1841. swqe->immediate_data_length = skb->len;
  1842. dev_kfree_skb(skb);
  1843. }
  1844. static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
  1845. {
  1846. struct tcphdr *tcp;
  1847. u32 tmp;
  1848. if ((skb->protocol == htons(ETH_P_IP)) &&
  1849. (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
  1850. tcp = (struct tcphdr *)(skb_network_header(skb) +
  1851. (ip_hdr(skb)->ihl * 4));
  1852. tmp = (tcp->source + (tcp->dest << 16)) % 31;
  1853. tmp += ip_hdr(skb)->daddr % 31;
  1854. return tmp % num_qps;
  1855. } else
  1856. return 0;
  1857. }
  1858. static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1859. {
  1860. struct ehea_port *port = netdev_priv(dev);
  1861. struct ehea_swqe *swqe;
  1862. unsigned long flags;
  1863. u32 lkey;
  1864. int swqe_index;
  1865. struct ehea_port_res *pr;
  1866. pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
  1867. if (!spin_trylock(&pr->xmit_lock))
  1868. return NETDEV_TX_BUSY;
  1869. if (pr->queue_stopped) {
  1870. spin_unlock(&pr->xmit_lock);
  1871. return NETDEV_TX_BUSY;
  1872. }
  1873. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  1874. memset(swqe, 0, SWQE_HEADER_SIZE);
  1875. atomic_dec(&pr->swqe_avail);
  1876. if (skb->len <= SWQE3_MAX_IMM) {
  1877. u32 sig_iv = port->sig_comp_iv;
  1878. u32 swqe_num = pr->swqe_id_counter;
  1879. ehea_xmit3(skb, dev, swqe);
  1880. swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
  1881. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
  1882. if (pr->swqe_ll_count >= (sig_iv - 1)) {
  1883. swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
  1884. sig_iv);
  1885. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1886. pr->swqe_ll_count = 0;
  1887. } else
  1888. pr->swqe_ll_count += 1;
  1889. } else {
  1890. swqe->wr_id =
  1891. EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
  1892. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
  1893. | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
  1894. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
  1895. pr->sq_skba.arr[pr->sq_skba.index] = skb;
  1896. pr->sq_skba.index++;
  1897. pr->sq_skba.index &= (pr->sq_skba.len - 1);
  1898. lkey = pr->send_mr.lkey;
  1899. ehea_xmit2(skb, dev, swqe, lkey);
  1900. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1901. }
  1902. pr->swqe_id_counter += 1;
  1903. if (port->vgrp && vlan_tx_tag_present(skb)) {
  1904. swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
  1905. swqe->vlan_tag = vlan_tx_tag_get(skb);
  1906. }
  1907. if (netif_msg_tx_queued(port)) {
  1908. ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
  1909. ehea_dump(swqe, 512, "swqe");
  1910. }
  1911. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  1912. netif_stop_queue(dev);
  1913. swqe->tx_control |= EHEA_SWQE_PURGE;
  1914. }
  1915. ehea_post_swqe(pr->qp, swqe);
  1916. pr->tx_packets++;
  1917. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1918. spin_lock_irqsave(&pr->netif_queue, flags);
  1919. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1920. pr->p_stats.queue_stopped++;
  1921. netif_stop_queue(dev);
  1922. pr->queue_stopped = 1;
  1923. }
  1924. spin_unlock_irqrestore(&pr->netif_queue, flags);
  1925. }
  1926. dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
  1927. spin_unlock(&pr->xmit_lock);
  1928. return NETDEV_TX_OK;
  1929. }
  1930. static void ehea_vlan_rx_register(struct net_device *dev,
  1931. struct vlan_group *grp)
  1932. {
  1933. struct ehea_port *port = netdev_priv(dev);
  1934. struct ehea_adapter *adapter = port->adapter;
  1935. struct hcp_ehea_port_cb1 *cb1;
  1936. u64 hret;
  1937. port->vgrp = grp;
  1938. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1939. if (!cb1) {
  1940. ehea_error("no mem for cb1");
  1941. goto out;
  1942. }
  1943. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1944. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1945. if (hret != H_SUCCESS)
  1946. ehea_error("modify_ehea_port failed");
  1947. free_page((unsigned long)cb1);
  1948. out:
  1949. return;
  1950. }
  1951. static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  1952. {
  1953. struct ehea_port *port = netdev_priv(dev);
  1954. struct ehea_adapter *adapter = port->adapter;
  1955. struct hcp_ehea_port_cb1 *cb1;
  1956. int index;
  1957. u64 hret;
  1958. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1959. if (!cb1) {
  1960. ehea_error("no mem for cb1");
  1961. goto out;
  1962. }
  1963. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1964. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1965. if (hret != H_SUCCESS) {
  1966. ehea_error("query_ehea_port failed");
  1967. goto out;
  1968. }
  1969. index = (vid / 64);
  1970. cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1971. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1972. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1973. if (hret != H_SUCCESS)
  1974. ehea_error("modify_ehea_port failed");
  1975. out:
  1976. free_page((unsigned long)cb1);
  1977. return;
  1978. }
  1979. static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  1980. {
  1981. struct ehea_port *port = netdev_priv(dev);
  1982. struct ehea_adapter *adapter = port->adapter;
  1983. struct hcp_ehea_port_cb1 *cb1;
  1984. int index;
  1985. u64 hret;
  1986. vlan_group_set_device(port->vgrp, vid, NULL);
  1987. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1988. if (!cb1) {
  1989. ehea_error("no mem for cb1");
  1990. goto out;
  1991. }
  1992. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1993. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1994. if (hret != H_SUCCESS) {
  1995. ehea_error("query_ehea_port failed");
  1996. goto out;
  1997. }
  1998. index = (vid / 64);
  1999. cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
  2000. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  2001. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  2002. if (hret != H_SUCCESS)
  2003. ehea_error("modify_ehea_port failed");
  2004. out:
  2005. free_page((unsigned long)cb1);
  2006. }
  2007. int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
  2008. {
  2009. int ret = -EIO;
  2010. u64 hret;
  2011. u16 dummy16 = 0;
  2012. u64 dummy64 = 0;
  2013. struct hcp_modify_qp_cb0 *cb0;
  2014. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2015. if (!cb0) {
  2016. ret = -ENOMEM;
  2017. goto out;
  2018. }
  2019. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2020. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  2021. if (hret != H_SUCCESS) {
  2022. ehea_error("query_ehea_qp failed (1)");
  2023. goto out;
  2024. }
  2025. cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
  2026. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2027. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  2028. &dummy64, &dummy64, &dummy16, &dummy16);
  2029. if (hret != H_SUCCESS) {
  2030. ehea_error("modify_ehea_qp failed (1)");
  2031. goto out;
  2032. }
  2033. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2034. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  2035. if (hret != H_SUCCESS) {
  2036. ehea_error("query_ehea_qp failed (2)");
  2037. goto out;
  2038. }
  2039. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
  2040. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2041. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  2042. &dummy64, &dummy64, &dummy16, &dummy16);
  2043. if (hret != H_SUCCESS) {
  2044. ehea_error("modify_ehea_qp failed (2)");
  2045. goto out;
  2046. }
  2047. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2048. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  2049. if (hret != H_SUCCESS) {
  2050. ehea_error("query_ehea_qp failed (3)");
  2051. goto out;
  2052. }
  2053. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
  2054. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2055. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  2056. &dummy64, &dummy64, &dummy16, &dummy16);
  2057. if (hret != H_SUCCESS) {
  2058. ehea_error("modify_ehea_qp failed (3)");
  2059. goto out;
  2060. }
  2061. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2062. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  2063. if (hret != H_SUCCESS) {
  2064. ehea_error("query_ehea_qp failed (4)");
  2065. goto out;
  2066. }
  2067. ret = 0;
  2068. out:
  2069. free_page((unsigned long)cb0);
  2070. return ret;
  2071. }
  2072. static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
  2073. int add_tx_qps)
  2074. {
  2075. int ret, i;
  2076. struct port_res_cfg pr_cfg, pr_cfg_small_rx;
  2077. enum ehea_eq_type eq_type = EHEA_EQ;
  2078. port->qp_eq = ehea_create_eq(port->adapter, eq_type,
  2079. EHEA_MAX_ENTRIES_EQ, 1);
  2080. if (!port->qp_eq) {
  2081. ret = -EINVAL;
  2082. ehea_error("ehea_create_eq failed (qp_eq)");
  2083. goto out_kill_eq;
  2084. }
  2085. pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
  2086. pr_cfg.max_entries_scq = sq_entries * 2;
  2087. pr_cfg.max_entries_sq = sq_entries;
  2088. pr_cfg.max_entries_rq1 = rq1_entries;
  2089. pr_cfg.max_entries_rq2 = rq2_entries;
  2090. pr_cfg.max_entries_rq3 = rq3_entries;
  2091. pr_cfg_small_rx.max_entries_rcq = 1;
  2092. pr_cfg_small_rx.max_entries_scq = sq_entries;
  2093. pr_cfg_small_rx.max_entries_sq = sq_entries;
  2094. pr_cfg_small_rx.max_entries_rq1 = 1;
  2095. pr_cfg_small_rx.max_entries_rq2 = 1;
  2096. pr_cfg_small_rx.max_entries_rq3 = 1;
  2097. for (i = 0; i < def_qps; i++) {
  2098. ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
  2099. if (ret)
  2100. goto out_clean_pr;
  2101. }
  2102. for (i = def_qps; i < def_qps + add_tx_qps; i++) {
  2103. ret = ehea_init_port_res(port, &port->port_res[i],
  2104. &pr_cfg_small_rx, i);
  2105. if (ret)
  2106. goto out_clean_pr;
  2107. }
  2108. return 0;
  2109. out_clean_pr:
  2110. while (--i >= 0)
  2111. ehea_clean_portres(port, &port->port_res[i]);
  2112. out_kill_eq:
  2113. ehea_destroy_eq(port->qp_eq);
  2114. return ret;
  2115. }
  2116. static int ehea_clean_all_portres(struct ehea_port *port)
  2117. {
  2118. int ret = 0;
  2119. int i;
  2120. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
  2121. ret |= ehea_clean_portres(port, &port->port_res[i]);
  2122. ret |= ehea_destroy_eq(port->qp_eq);
  2123. return ret;
  2124. }
  2125. static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
  2126. {
  2127. if (adapter->active_ports)
  2128. return;
  2129. ehea_rem_mr(&adapter->mr);
  2130. }
  2131. static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
  2132. {
  2133. if (adapter->active_ports)
  2134. return 0;
  2135. return ehea_reg_kernel_mr(adapter, &adapter->mr);
  2136. }
  2137. static int ehea_up(struct net_device *dev)
  2138. {
  2139. int ret, i;
  2140. struct ehea_port *port = netdev_priv(dev);
  2141. if (port->state == EHEA_PORT_UP)
  2142. return 0;
  2143. ret = ehea_port_res_setup(port, port->num_def_qps,
  2144. port->num_add_tx_qps);
  2145. if (ret) {
  2146. ehea_error("port_res_failed");
  2147. goto out;
  2148. }
  2149. /* Set default QP for this port */
  2150. ret = ehea_configure_port(port);
  2151. if (ret) {
  2152. ehea_error("ehea_configure_port failed. ret:%d", ret);
  2153. goto out_clean_pr;
  2154. }
  2155. ret = ehea_reg_interrupts(dev);
  2156. if (ret) {
  2157. ehea_error("reg_interrupts failed. ret:%d", ret);
  2158. goto out_clean_pr;
  2159. }
  2160. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  2161. ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
  2162. if (ret) {
  2163. ehea_error("activate_qp failed");
  2164. goto out_free_irqs;
  2165. }
  2166. }
  2167. for (i = 0; i < port->num_def_qps; i++) {
  2168. ret = ehea_fill_port_res(&port->port_res[i]);
  2169. if (ret) {
  2170. ehea_error("out_free_irqs");
  2171. goto out_free_irqs;
  2172. }
  2173. }
  2174. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  2175. if (ret) {
  2176. ret = -EIO;
  2177. goto out_free_irqs;
  2178. }
  2179. port->state = EHEA_PORT_UP;
  2180. ret = 0;
  2181. goto out;
  2182. out_free_irqs:
  2183. ehea_free_interrupts(dev);
  2184. out_clean_pr:
  2185. ehea_clean_all_portres(port);
  2186. out:
  2187. if (ret)
  2188. ehea_info("Failed starting %s. ret=%i", dev->name, ret);
  2189. ehea_update_bcmc_registrations();
  2190. ehea_update_firmware_handles();
  2191. return ret;
  2192. }
  2193. static void port_napi_disable(struct ehea_port *port)
  2194. {
  2195. int i;
  2196. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
  2197. napi_disable(&port->port_res[i].napi);
  2198. }
  2199. static void port_napi_enable(struct ehea_port *port)
  2200. {
  2201. int i;
  2202. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
  2203. napi_enable(&port->port_res[i].napi);
  2204. }
  2205. static int ehea_open(struct net_device *dev)
  2206. {
  2207. int ret;
  2208. struct ehea_port *port = netdev_priv(dev);
  2209. mutex_lock(&port->port_lock);
  2210. if (netif_msg_ifup(port))
  2211. ehea_info("enabling port %s", dev->name);
  2212. ret = ehea_up(dev);
  2213. if (!ret) {
  2214. port_napi_enable(port);
  2215. netif_start_queue(dev);
  2216. }
  2217. mutex_unlock(&port->port_lock);
  2218. return ret;
  2219. }
  2220. static int ehea_down(struct net_device *dev)
  2221. {
  2222. int ret;
  2223. struct ehea_port *port = netdev_priv(dev);
  2224. if (port->state == EHEA_PORT_DOWN)
  2225. return 0;
  2226. ehea_drop_multicast_list(dev);
  2227. ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  2228. ehea_free_interrupts(dev);
  2229. port->state = EHEA_PORT_DOWN;
  2230. ehea_update_bcmc_registrations();
  2231. ret = ehea_clean_all_portres(port);
  2232. if (ret)
  2233. ehea_info("Failed freeing resources for %s. ret=%i",
  2234. dev->name, ret);
  2235. ehea_update_firmware_handles();
  2236. return ret;
  2237. }
  2238. static int ehea_stop(struct net_device *dev)
  2239. {
  2240. int ret;
  2241. struct ehea_port *port = netdev_priv(dev);
  2242. if (netif_msg_ifdown(port))
  2243. ehea_info("disabling port %s", dev->name);
  2244. set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2245. cancel_work_sync(&port->reset_task);
  2246. mutex_lock(&port->port_lock);
  2247. netif_stop_queue(dev);
  2248. port_napi_disable(port);
  2249. ret = ehea_down(dev);
  2250. mutex_unlock(&port->port_lock);
  2251. clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2252. return ret;
  2253. }
  2254. static void ehea_purge_sq(struct ehea_qp *orig_qp)
  2255. {
  2256. struct ehea_qp qp = *orig_qp;
  2257. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2258. struct ehea_swqe *swqe;
  2259. int wqe_index;
  2260. int i;
  2261. for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
  2262. swqe = ehea_get_swqe(&qp, &wqe_index);
  2263. swqe->tx_control |= EHEA_SWQE_PURGE;
  2264. }
  2265. }
  2266. static void ehea_flush_sq(struct ehea_port *port)
  2267. {
  2268. int i;
  2269. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  2270. struct ehea_port_res *pr = &port->port_res[i];
  2271. int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
  2272. int k = 0;
  2273. while (atomic_read(&pr->swqe_avail) < swqe_max) {
  2274. msleep(5);
  2275. if (++k == 20) {
  2276. ehea_error("WARNING: sq not flushed completely");
  2277. break;
  2278. }
  2279. }
  2280. }
  2281. }
  2282. int ehea_stop_qps(struct net_device *dev)
  2283. {
  2284. struct ehea_port *port = netdev_priv(dev);
  2285. struct ehea_adapter *adapter = port->adapter;
  2286. struct hcp_modify_qp_cb0 *cb0;
  2287. int ret = -EIO;
  2288. int dret;
  2289. int i;
  2290. u64 hret;
  2291. u64 dummy64 = 0;
  2292. u16 dummy16 = 0;
  2293. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2294. if (!cb0) {
  2295. ret = -ENOMEM;
  2296. goto out;
  2297. }
  2298. for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
  2299. struct ehea_port_res *pr = &port->port_res[i];
  2300. struct ehea_qp *qp = pr->qp;
  2301. /* Purge send queue */
  2302. ehea_purge_sq(qp);
  2303. /* Disable queue pair */
  2304. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2305. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2306. cb0);
  2307. if (hret != H_SUCCESS) {
  2308. ehea_error("query_ehea_qp failed (1)");
  2309. goto out;
  2310. }
  2311. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2312. cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
  2313. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2314. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2315. 1), cb0, &dummy64,
  2316. &dummy64, &dummy16, &dummy16);
  2317. if (hret != H_SUCCESS) {
  2318. ehea_error("modify_ehea_qp failed (1)");
  2319. goto out;
  2320. }
  2321. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2322. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2323. cb0);
  2324. if (hret != H_SUCCESS) {
  2325. ehea_error("query_ehea_qp failed (2)");
  2326. goto out;
  2327. }
  2328. /* deregister shared memory regions */
  2329. dret = ehea_rem_smrs(pr);
  2330. if (dret) {
  2331. ehea_error("unreg shared memory region failed");
  2332. goto out;
  2333. }
  2334. }
  2335. ret = 0;
  2336. out:
  2337. free_page((unsigned long)cb0);
  2338. return ret;
  2339. }
  2340. void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
  2341. {
  2342. struct ehea_qp qp = *orig_qp;
  2343. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2344. struct ehea_rwqe *rwqe;
  2345. struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
  2346. struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
  2347. struct sk_buff *skb;
  2348. u32 lkey = pr->recv_mr.lkey;
  2349. int i;
  2350. int index;
  2351. for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
  2352. rwqe = ehea_get_next_rwqe(&qp, 2);
  2353. rwqe->sg_list[0].l_key = lkey;
  2354. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2355. skb = skba_rq2[index];
  2356. if (skb)
  2357. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2358. }
  2359. for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
  2360. rwqe = ehea_get_next_rwqe(&qp, 3);
  2361. rwqe->sg_list[0].l_key = lkey;
  2362. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2363. skb = skba_rq3[index];
  2364. if (skb)
  2365. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2366. }
  2367. }
  2368. int ehea_restart_qps(struct net_device *dev)
  2369. {
  2370. struct ehea_port *port = netdev_priv(dev);
  2371. struct ehea_adapter *adapter = port->adapter;
  2372. int ret = 0;
  2373. int i;
  2374. struct hcp_modify_qp_cb0 *cb0;
  2375. u64 hret;
  2376. u64 dummy64 = 0;
  2377. u16 dummy16 = 0;
  2378. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2379. if (!cb0) {
  2380. ret = -ENOMEM;
  2381. goto out;
  2382. }
  2383. for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
  2384. struct ehea_port_res *pr = &port->port_res[i];
  2385. struct ehea_qp *qp = pr->qp;
  2386. ret = ehea_gen_smrs(pr);
  2387. if (ret) {
  2388. ehea_error("creation of shared memory regions failed");
  2389. goto out;
  2390. }
  2391. ehea_update_rqs(qp, pr);
  2392. /* Enable queue pair */
  2393. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2394. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2395. cb0);
  2396. if (hret != H_SUCCESS) {
  2397. ehea_error("query_ehea_qp failed (1)");
  2398. goto out;
  2399. }
  2400. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2401. cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
  2402. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2403. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2404. 1), cb0, &dummy64,
  2405. &dummy64, &dummy16, &dummy16);
  2406. if (hret != H_SUCCESS) {
  2407. ehea_error("modify_ehea_qp failed (1)");
  2408. goto out;
  2409. }
  2410. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2411. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2412. cb0);
  2413. if (hret != H_SUCCESS) {
  2414. ehea_error("query_ehea_qp failed (2)");
  2415. goto out;
  2416. }
  2417. /* refill entire queue */
  2418. ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
  2419. ehea_refill_rq2(pr, 0);
  2420. ehea_refill_rq3(pr, 0);
  2421. }
  2422. out:
  2423. free_page((unsigned long)cb0);
  2424. return ret;
  2425. }
  2426. static void ehea_reset_port(struct work_struct *work)
  2427. {
  2428. int ret;
  2429. struct ehea_port *port =
  2430. container_of(work, struct ehea_port, reset_task);
  2431. struct net_device *dev = port->netdev;
  2432. mutex_lock(&dlpar_mem_lock);
  2433. port->resets++;
  2434. mutex_lock(&port->port_lock);
  2435. netif_stop_queue(dev);
  2436. port_napi_disable(port);
  2437. ehea_down(dev);
  2438. ret = ehea_up(dev);
  2439. if (ret)
  2440. goto out;
  2441. ehea_set_multicast_list(dev);
  2442. if (netif_msg_timer(port))
  2443. ehea_info("Device %s resetted successfully", dev->name);
  2444. port_napi_enable(port);
  2445. netif_wake_queue(dev);
  2446. out:
  2447. mutex_unlock(&port->port_lock);
  2448. mutex_unlock(&dlpar_mem_lock);
  2449. }
  2450. static void ehea_rereg_mrs(struct work_struct *work)
  2451. {
  2452. int ret, i;
  2453. struct ehea_adapter *adapter;
  2454. ehea_info("LPAR memory changed - re-initializing driver");
  2455. list_for_each_entry(adapter, &adapter_list, list)
  2456. if (adapter->active_ports) {
  2457. /* Shutdown all ports */
  2458. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2459. struct ehea_port *port = adapter->port[i];
  2460. struct net_device *dev;
  2461. if (!port)
  2462. continue;
  2463. dev = port->netdev;
  2464. if (dev->flags & IFF_UP) {
  2465. mutex_lock(&port->port_lock);
  2466. netif_stop_queue(dev);
  2467. ehea_flush_sq(port);
  2468. ret = ehea_stop_qps(dev);
  2469. if (ret) {
  2470. mutex_unlock(&port->port_lock);
  2471. goto out;
  2472. }
  2473. port_napi_disable(port);
  2474. mutex_unlock(&port->port_lock);
  2475. }
  2476. reset_sq_restart_flag(port);
  2477. }
  2478. /* Unregister old memory region */
  2479. ret = ehea_rem_mr(&adapter->mr);
  2480. if (ret) {
  2481. ehea_error("unregister MR failed - driver"
  2482. " inoperable!");
  2483. goto out;
  2484. }
  2485. }
  2486. clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2487. list_for_each_entry(adapter, &adapter_list, list)
  2488. if (adapter->active_ports) {
  2489. /* Register new memory region */
  2490. ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
  2491. if (ret) {
  2492. ehea_error("register MR failed - driver"
  2493. " inoperable!");
  2494. goto out;
  2495. }
  2496. /* Restart all ports */
  2497. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2498. struct ehea_port *port = adapter->port[i];
  2499. if (port) {
  2500. struct net_device *dev = port->netdev;
  2501. if (dev->flags & IFF_UP) {
  2502. mutex_lock(&port->port_lock);
  2503. port_napi_enable(port);
  2504. ret = ehea_restart_qps(dev);
  2505. check_sqs(port);
  2506. if (!ret)
  2507. netif_wake_queue(dev);
  2508. mutex_unlock(&port->port_lock);
  2509. }
  2510. }
  2511. }
  2512. }
  2513. ehea_info("re-initializing driver complete");
  2514. out:
  2515. return;
  2516. }
  2517. static void ehea_tx_watchdog(struct net_device *dev)
  2518. {
  2519. struct ehea_port *port = netdev_priv(dev);
  2520. if (netif_carrier_ok(dev) &&
  2521. !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
  2522. ehea_schedule_port_reset(port);
  2523. }
  2524. int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
  2525. {
  2526. struct hcp_query_ehea *cb;
  2527. u64 hret;
  2528. int ret;
  2529. cb = (void *)get_zeroed_page(GFP_KERNEL);
  2530. if (!cb) {
  2531. ret = -ENOMEM;
  2532. goto out;
  2533. }
  2534. hret = ehea_h_query_ehea(adapter->handle, cb);
  2535. if (hret != H_SUCCESS) {
  2536. ret = -EIO;
  2537. goto out_herr;
  2538. }
  2539. adapter->max_mc_mac = cb->max_mc_mac - 1;
  2540. ret = 0;
  2541. out_herr:
  2542. free_page((unsigned long)cb);
  2543. out:
  2544. return ret;
  2545. }
  2546. int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
  2547. {
  2548. struct hcp_ehea_port_cb4 *cb4;
  2549. u64 hret;
  2550. int ret = 0;
  2551. *jumbo = 0;
  2552. /* (Try to) enable *jumbo frames */
  2553. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  2554. if (!cb4) {
  2555. ehea_error("no mem for cb4");
  2556. ret = -ENOMEM;
  2557. goto out;
  2558. } else {
  2559. hret = ehea_h_query_ehea_port(port->adapter->handle,
  2560. port->logical_port_id,
  2561. H_PORT_CB4,
  2562. H_PORT_CB4_JUMBO, cb4);
  2563. if (hret == H_SUCCESS) {
  2564. if (cb4->jumbo_frame)
  2565. *jumbo = 1;
  2566. else {
  2567. cb4->jumbo_frame = 1;
  2568. hret = ehea_h_modify_ehea_port(port->adapter->
  2569. handle,
  2570. port->
  2571. logical_port_id,
  2572. H_PORT_CB4,
  2573. H_PORT_CB4_JUMBO,
  2574. cb4);
  2575. if (hret == H_SUCCESS)
  2576. *jumbo = 1;
  2577. }
  2578. } else
  2579. ret = -EINVAL;
  2580. free_page((unsigned long)cb4);
  2581. }
  2582. out:
  2583. return ret;
  2584. }
  2585. static ssize_t ehea_show_port_id(struct device *dev,
  2586. struct device_attribute *attr, char *buf)
  2587. {
  2588. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2589. return sprintf(buf, "%d", port->logical_port_id);
  2590. }
  2591. static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
  2592. NULL);
  2593. static void __devinit logical_port_release(struct device *dev)
  2594. {
  2595. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2596. of_node_put(port->ofdev.dev.of_node);
  2597. }
  2598. static struct device *ehea_register_port(struct ehea_port *port,
  2599. struct device_node *dn)
  2600. {
  2601. int ret;
  2602. port->ofdev.dev.of_node = of_node_get(dn);
  2603. port->ofdev.dev.parent = &port->adapter->ofdev->dev;
  2604. port->ofdev.dev.bus = &ibmebus_bus_type;
  2605. dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
  2606. port->ofdev.dev.release = logical_port_release;
  2607. ret = of_device_register(&port->ofdev);
  2608. if (ret) {
  2609. ehea_error("failed to register device. ret=%d", ret);
  2610. goto out;
  2611. }
  2612. ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2613. if (ret) {
  2614. ehea_error("failed to register attributes, ret=%d", ret);
  2615. goto out_unreg_of_dev;
  2616. }
  2617. return &port->ofdev.dev;
  2618. out_unreg_of_dev:
  2619. of_device_unregister(&port->ofdev);
  2620. out:
  2621. return NULL;
  2622. }
  2623. static void ehea_unregister_port(struct ehea_port *port)
  2624. {
  2625. device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2626. of_device_unregister(&port->ofdev);
  2627. }
  2628. static const struct net_device_ops ehea_netdev_ops = {
  2629. .ndo_open = ehea_open,
  2630. .ndo_stop = ehea_stop,
  2631. .ndo_start_xmit = ehea_start_xmit,
  2632. #ifdef CONFIG_NET_POLL_CONTROLLER
  2633. .ndo_poll_controller = ehea_netpoll,
  2634. #endif
  2635. .ndo_get_stats = ehea_get_stats,
  2636. .ndo_set_mac_address = ehea_set_mac_addr,
  2637. .ndo_validate_addr = eth_validate_addr,
  2638. .ndo_set_multicast_list = ehea_set_multicast_list,
  2639. .ndo_change_mtu = ehea_change_mtu,
  2640. .ndo_vlan_rx_register = ehea_vlan_rx_register,
  2641. .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
  2642. .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
  2643. .ndo_tx_timeout = ehea_tx_watchdog,
  2644. };
  2645. struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
  2646. u32 logical_port_id,
  2647. struct device_node *dn)
  2648. {
  2649. int ret;
  2650. struct net_device *dev;
  2651. struct ehea_port *port;
  2652. struct device *port_dev;
  2653. int jumbo;
  2654. /* allocate memory for the port structures */
  2655. dev = alloc_etherdev(sizeof(struct ehea_port));
  2656. if (!dev) {
  2657. ehea_error("no mem for net_device");
  2658. ret = -ENOMEM;
  2659. goto out_err;
  2660. }
  2661. port = netdev_priv(dev);
  2662. mutex_init(&port->port_lock);
  2663. port->state = EHEA_PORT_DOWN;
  2664. port->sig_comp_iv = sq_entries / 10;
  2665. port->adapter = adapter;
  2666. port->netdev = dev;
  2667. port->logical_port_id = logical_port_id;
  2668. port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
  2669. port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
  2670. if (!port->mc_list) {
  2671. ret = -ENOMEM;
  2672. goto out_free_ethdev;
  2673. }
  2674. INIT_LIST_HEAD(&port->mc_list->list);
  2675. ret = ehea_sense_port_attr(port);
  2676. if (ret)
  2677. goto out_free_mc_list;
  2678. port_dev = ehea_register_port(port, dn);
  2679. if (!port_dev)
  2680. goto out_free_mc_list;
  2681. SET_NETDEV_DEV(dev, port_dev);
  2682. /* initialize net_device structure */
  2683. memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
  2684. dev->netdev_ops = &ehea_netdev_ops;
  2685. ehea_set_ethtool_ops(dev);
  2686. dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
  2687. | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
  2688. | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
  2689. | NETIF_F_LLTX;
  2690. dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
  2691. INIT_WORK(&port->reset_task, ehea_reset_port);
  2692. ret = register_netdev(dev);
  2693. if (ret) {
  2694. ehea_error("register_netdev failed. ret=%d", ret);
  2695. goto out_unreg_port;
  2696. }
  2697. port->lro_max_aggr = lro_max_aggr;
  2698. ret = ehea_get_jumboframe_status(port, &jumbo);
  2699. if (ret)
  2700. ehea_error("failed determining jumbo frame status for %s",
  2701. port->netdev->name);
  2702. ehea_info("%s: Jumbo frames are %sabled", dev->name,
  2703. jumbo == 1 ? "en" : "dis");
  2704. adapter->active_ports++;
  2705. return port;
  2706. out_unreg_port:
  2707. ehea_unregister_port(port);
  2708. out_free_mc_list:
  2709. kfree(port->mc_list);
  2710. out_free_ethdev:
  2711. free_netdev(dev);
  2712. out_err:
  2713. ehea_error("setting up logical port with id=%d failed, ret=%d",
  2714. logical_port_id, ret);
  2715. return NULL;
  2716. }
  2717. static void ehea_shutdown_single_port(struct ehea_port *port)
  2718. {
  2719. struct ehea_adapter *adapter = port->adapter;
  2720. unregister_netdev(port->netdev);
  2721. ehea_unregister_port(port);
  2722. kfree(port->mc_list);
  2723. free_netdev(port->netdev);
  2724. adapter->active_ports--;
  2725. }
  2726. static int ehea_setup_ports(struct ehea_adapter *adapter)
  2727. {
  2728. struct device_node *lhea_dn;
  2729. struct device_node *eth_dn = NULL;
  2730. const u32 *dn_log_port_id;
  2731. int i = 0;
  2732. lhea_dn = adapter->ofdev->dev.of_node;
  2733. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2734. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2735. NULL);
  2736. if (!dn_log_port_id) {
  2737. ehea_error("bad device node: eth_dn name=%s",
  2738. eth_dn->full_name);
  2739. continue;
  2740. }
  2741. if (ehea_add_adapter_mr(adapter)) {
  2742. ehea_error("creating MR failed");
  2743. of_node_put(eth_dn);
  2744. return -EIO;
  2745. }
  2746. adapter->port[i] = ehea_setup_single_port(adapter,
  2747. *dn_log_port_id,
  2748. eth_dn);
  2749. if (adapter->port[i])
  2750. ehea_info("%s -> logical port id #%d",
  2751. adapter->port[i]->netdev->name,
  2752. *dn_log_port_id);
  2753. else
  2754. ehea_remove_adapter_mr(adapter);
  2755. i++;
  2756. }
  2757. return 0;
  2758. }
  2759. static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
  2760. u32 logical_port_id)
  2761. {
  2762. struct device_node *lhea_dn;
  2763. struct device_node *eth_dn = NULL;
  2764. const u32 *dn_log_port_id;
  2765. lhea_dn = adapter->ofdev->dev.of_node;
  2766. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2767. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2768. NULL);
  2769. if (dn_log_port_id)
  2770. if (*dn_log_port_id == logical_port_id)
  2771. return eth_dn;
  2772. }
  2773. return NULL;
  2774. }
  2775. static ssize_t ehea_probe_port(struct device *dev,
  2776. struct device_attribute *attr,
  2777. const char *buf, size_t count)
  2778. {
  2779. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2780. struct ehea_port *port;
  2781. struct device_node *eth_dn = NULL;
  2782. int i;
  2783. u32 logical_port_id;
  2784. sscanf(buf, "%d", &logical_port_id);
  2785. port = ehea_get_port(adapter, logical_port_id);
  2786. if (port) {
  2787. ehea_info("adding port with logical port id=%d failed. port "
  2788. "already configured as %s.", logical_port_id,
  2789. port->netdev->name);
  2790. return -EINVAL;
  2791. }
  2792. eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
  2793. if (!eth_dn) {
  2794. ehea_info("no logical port with id %d found", logical_port_id);
  2795. return -EINVAL;
  2796. }
  2797. if (ehea_add_adapter_mr(adapter)) {
  2798. ehea_error("creating MR failed");
  2799. return -EIO;
  2800. }
  2801. port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
  2802. of_node_put(eth_dn);
  2803. if (port) {
  2804. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2805. if (!adapter->port[i]) {
  2806. adapter->port[i] = port;
  2807. break;
  2808. }
  2809. ehea_info("added %s (logical port id=%d)", port->netdev->name,
  2810. logical_port_id);
  2811. } else {
  2812. ehea_remove_adapter_mr(adapter);
  2813. return -EIO;
  2814. }
  2815. return (ssize_t) count;
  2816. }
  2817. static ssize_t ehea_remove_port(struct device *dev,
  2818. struct device_attribute *attr,
  2819. const char *buf, size_t count)
  2820. {
  2821. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2822. struct ehea_port *port;
  2823. int i;
  2824. u32 logical_port_id;
  2825. sscanf(buf, "%d", &logical_port_id);
  2826. port = ehea_get_port(adapter, logical_port_id);
  2827. if (port) {
  2828. ehea_info("removed %s (logical port id=%d)", port->netdev->name,
  2829. logical_port_id);
  2830. ehea_shutdown_single_port(port);
  2831. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2832. if (adapter->port[i] == port) {
  2833. adapter->port[i] = NULL;
  2834. break;
  2835. }
  2836. } else {
  2837. ehea_error("removing port with logical port id=%d failed. port "
  2838. "not configured.", logical_port_id);
  2839. return -EINVAL;
  2840. }
  2841. ehea_remove_adapter_mr(adapter);
  2842. return (ssize_t) count;
  2843. }
  2844. static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
  2845. static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
  2846. int ehea_create_device_sysfs(struct platform_device *dev)
  2847. {
  2848. int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
  2849. if (ret)
  2850. goto out;
  2851. ret = device_create_file(&dev->dev, &dev_attr_remove_port);
  2852. out:
  2853. return ret;
  2854. }
  2855. void ehea_remove_device_sysfs(struct platform_device *dev)
  2856. {
  2857. device_remove_file(&dev->dev, &dev_attr_probe_port);
  2858. device_remove_file(&dev->dev, &dev_attr_remove_port);
  2859. }
  2860. static int __devinit ehea_probe_adapter(struct platform_device *dev,
  2861. const struct of_device_id *id)
  2862. {
  2863. struct ehea_adapter *adapter;
  2864. const u64 *adapter_handle;
  2865. int ret;
  2866. if (!dev || !dev->dev.of_node) {
  2867. ehea_error("Invalid ibmebus device probed");
  2868. return -EINVAL;
  2869. }
  2870. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  2871. if (!adapter) {
  2872. ret = -ENOMEM;
  2873. dev_err(&dev->dev, "no mem for ehea_adapter\n");
  2874. goto out;
  2875. }
  2876. list_add(&adapter->list, &adapter_list);
  2877. adapter->ofdev = dev;
  2878. adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
  2879. NULL);
  2880. if (adapter_handle)
  2881. adapter->handle = *adapter_handle;
  2882. if (!adapter->handle) {
  2883. dev_err(&dev->dev, "failed getting handle for adapter"
  2884. " '%s'\n", dev->dev.of_node->full_name);
  2885. ret = -ENODEV;
  2886. goto out_free_ad;
  2887. }
  2888. adapter->pd = EHEA_PD_ID;
  2889. dev_set_drvdata(&dev->dev, adapter);
  2890. /* initialize adapter and ports */
  2891. /* get adapter properties */
  2892. ret = ehea_sense_adapter_attr(adapter);
  2893. if (ret) {
  2894. dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
  2895. goto out_free_ad;
  2896. }
  2897. adapter->neq = ehea_create_eq(adapter,
  2898. EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
  2899. if (!adapter->neq) {
  2900. ret = -EIO;
  2901. dev_err(&dev->dev, "NEQ creation failed\n");
  2902. goto out_free_ad;
  2903. }
  2904. tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
  2905. (unsigned long)adapter);
  2906. ret = ibmebus_request_irq(adapter->neq->attr.ist1,
  2907. ehea_interrupt_neq, IRQF_DISABLED,
  2908. "ehea_neq", adapter);
  2909. if (ret) {
  2910. dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
  2911. goto out_kill_eq;
  2912. }
  2913. ret = ehea_create_device_sysfs(dev);
  2914. if (ret)
  2915. goto out_free_irq;
  2916. ret = ehea_setup_ports(adapter);
  2917. if (ret) {
  2918. dev_err(&dev->dev, "setup_ports failed\n");
  2919. goto out_rem_dev_sysfs;
  2920. }
  2921. ret = 0;
  2922. goto out;
  2923. out_rem_dev_sysfs:
  2924. ehea_remove_device_sysfs(dev);
  2925. out_free_irq:
  2926. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2927. out_kill_eq:
  2928. ehea_destroy_eq(adapter->neq);
  2929. out_free_ad:
  2930. list_del(&adapter->list);
  2931. kfree(adapter);
  2932. out:
  2933. ehea_update_firmware_handles();
  2934. return ret;
  2935. }
  2936. static int __devexit ehea_remove(struct platform_device *dev)
  2937. {
  2938. struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
  2939. int i;
  2940. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2941. if (adapter->port[i]) {
  2942. ehea_shutdown_single_port(adapter->port[i]);
  2943. adapter->port[i] = NULL;
  2944. }
  2945. ehea_remove_device_sysfs(dev);
  2946. flush_scheduled_work();
  2947. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2948. tasklet_kill(&adapter->neq_tasklet);
  2949. ehea_destroy_eq(adapter->neq);
  2950. ehea_remove_adapter_mr(adapter);
  2951. list_del(&adapter->list);
  2952. kfree(adapter);
  2953. ehea_update_firmware_handles();
  2954. return 0;
  2955. }
  2956. void ehea_crash_handler(void)
  2957. {
  2958. int i;
  2959. if (ehea_fw_handles.arr)
  2960. for (i = 0; i < ehea_fw_handles.num_entries; i++)
  2961. ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
  2962. ehea_fw_handles.arr[i].fwh,
  2963. FORCE_FREE);
  2964. if (ehea_bcmc_regs.arr)
  2965. for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
  2966. ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
  2967. ehea_bcmc_regs.arr[i].port_id,
  2968. ehea_bcmc_regs.arr[i].reg_type,
  2969. ehea_bcmc_regs.arr[i].macaddr,
  2970. 0, H_DEREG_BCMC);
  2971. }
  2972. static int ehea_mem_notifier(struct notifier_block *nb,
  2973. unsigned long action, void *data)
  2974. {
  2975. int ret = NOTIFY_BAD;
  2976. struct memory_notify *arg = data;
  2977. mutex_lock(&dlpar_mem_lock);
  2978. switch (action) {
  2979. case MEM_CANCEL_OFFLINE:
  2980. ehea_info("memory offlining canceled");
  2981. /* Readd canceled memory block */
  2982. case MEM_ONLINE:
  2983. ehea_info("memory is going online");
  2984. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2985. if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
  2986. goto out_unlock;
  2987. ehea_rereg_mrs(NULL);
  2988. break;
  2989. case MEM_GOING_OFFLINE:
  2990. ehea_info("memory is going offline");
  2991. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2992. if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
  2993. goto out_unlock;
  2994. ehea_rereg_mrs(NULL);
  2995. break;
  2996. default:
  2997. break;
  2998. }
  2999. ehea_update_firmware_handles();
  3000. ret = NOTIFY_OK;
  3001. out_unlock:
  3002. mutex_unlock(&dlpar_mem_lock);
  3003. return ret;
  3004. }
  3005. static struct notifier_block ehea_mem_nb = {
  3006. .notifier_call = ehea_mem_notifier,
  3007. };
  3008. static int ehea_reboot_notifier(struct notifier_block *nb,
  3009. unsigned long action, void *unused)
  3010. {
  3011. if (action == SYS_RESTART) {
  3012. ehea_info("Reboot: freeing all eHEA resources");
  3013. ibmebus_unregister_driver(&ehea_driver);
  3014. }
  3015. return NOTIFY_DONE;
  3016. }
  3017. static struct notifier_block ehea_reboot_nb = {
  3018. .notifier_call = ehea_reboot_notifier,
  3019. };
  3020. static int check_module_parm(void)
  3021. {
  3022. int ret = 0;
  3023. if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
  3024. (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
  3025. ehea_info("Bad parameter: rq1_entries");
  3026. ret = -EINVAL;
  3027. }
  3028. if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
  3029. (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
  3030. ehea_info("Bad parameter: rq2_entries");
  3031. ret = -EINVAL;
  3032. }
  3033. if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
  3034. (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
  3035. ehea_info("Bad parameter: rq3_entries");
  3036. ret = -EINVAL;
  3037. }
  3038. if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
  3039. (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
  3040. ehea_info("Bad parameter: sq_entries");
  3041. ret = -EINVAL;
  3042. }
  3043. return ret;
  3044. }
  3045. static ssize_t ehea_show_capabilities(struct device_driver *drv,
  3046. char *buf)
  3047. {
  3048. return sprintf(buf, "%d", EHEA_CAPABILITIES);
  3049. }
  3050. static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
  3051. ehea_show_capabilities, NULL);
  3052. int __init ehea_module_init(void)
  3053. {
  3054. int ret;
  3055. printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
  3056. DRV_VERSION);
  3057. INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
  3058. memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
  3059. memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
  3060. mutex_init(&ehea_fw_handles.lock);
  3061. spin_lock_init(&ehea_bcmc_regs.lock);
  3062. ret = check_module_parm();
  3063. if (ret)
  3064. goto out;
  3065. ret = ehea_create_busmap();
  3066. if (ret)
  3067. goto out;
  3068. ret = register_reboot_notifier(&ehea_reboot_nb);
  3069. if (ret)
  3070. ehea_info("failed registering reboot notifier");
  3071. ret = register_memory_notifier(&ehea_mem_nb);
  3072. if (ret)
  3073. ehea_info("failed registering memory remove notifier");
  3074. ret = crash_shutdown_register(&ehea_crash_handler);
  3075. if (ret)
  3076. ehea_info("failed registering crash handler");
  3077. ret = ibmebus_register_driver(&ehea_driver);
  3078. if (ret) {
  3079. ehea_error("failed registering eHEA device driver on ebus");
  3080. goto out2;
  3081. }
  3082. ret = driver_create_file(&ehea_driver.driver,
  3083. &driver_attr_capabilities);
  3084. if (ret) {
  3085. ehea_error("failed to register capabilities attribute, ret=%d",
  3086. ret);
  3087. goto out3;
  3088. }
  3089. return ret;
  3090. out3:
  3091. ibmebus_unregister_driver(&ehea_driver);
  3092. out2:
  3093. unregister_memory_notifier(&ehea_mem_nb);
  3094. unregister_reboot_notifier(&ehea_reboot_nb);
  3095. crash_shutdown_unregister(&ehea_crash_handler);
  3096. out:
  3097. return ret;
  3098. }
  3099. static void __exit ehea_module_exit(void)
  3100. {
  3101. int ret;
  3102. flush_scheduled_work();
  3103. driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
  3104. ibmebus_unregister_driver(&ehea_driver);
  3105. unregister_reboot_notifier(&ehea_reboot_nb);
  3106. ret = crash_shutdown_unregister(&ehea_crash_handler);
  3107. if (ret)
  3108. ehea_info("failed unregistering crash handler");
  3109. unregister_memory_notifier(&ehea_mem_nb);
  3110. kfree(ehea_fw_handles.arr);
  3111. kfree(ehea_bcmc_regs.arr);
  3112. ehea_destroy_busmap();
  3113. }
  3114. module_init(ehea_module_init);
  3115. module_exit(ehea_module_exit);