ehea_main.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556
  1. /*
  2. * linux/drivers/net/ehea/ehea_main.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/in.h>
  30. #include <linux/ip.h>
  31. #include <linux/tcp.h>
  32. #include <linux/udp.h>
  33. #include <linux/if.h>
  34. #include <linux/list.h>
  35. #include <linux/slab.h>
  36. #include <linux/if_ether.h>
  37. #include <linux/notifier.h>
  38. #include <linux/reboot.h>
  39. #include <linux/memory.h>
  40. #include <asm/kexec.h>
  41. #include <linux/mutex.h>
  42. #include <linux/prefetch.h>
  43. #include <net/ip.h>
  44. #include "ehea.h"
  45. #include "ehea_qmr.h"
  46. #include "ehea_phyp.h"
  47. MODULE_LICENSE("GPL");
  48. MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  49. MODULE_DESCRIPTION("IBM eServer HEA Driver");
  50. MODULE_VERSION(DRV_VERSION);
  51. static int msg_level = -1;
  52. static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  53. static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  54. static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  55. static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  56. static int use_mcs = 1;
  57. static int prop_carrier_state;
  58. module_param(msg_level, int, 0);
  59. module_param(rq1_entries, int, 0);
  60. module_param(rq2_entries, int, 0);
  61. module_param(rq3_entries, int, 0);
  62. module_param(sq_entries, int, 0);
  63. module_param(prop_carrier_state, int, 0);
  64. module_param(use_mcs, int, 0);
  65. MODULE_PARM_DESC(msg_level, "msg_level");
  66. MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  67. "port to stack. 1:yes, 0:no. Default = 0 ");
  68. MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  69. "[2^x - 1], x = [6..14]. Default = "
  70. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  71. MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  72. "[2^x - 1], x = [6..14]. Default = "
  73. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  74. MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  75. "[2^x - 1], x = [6..14]. Default = "
  76. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  77. MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
  78. "[2^x - 1], x = [6..14]. Default = "
  79. __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  80. MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
  81. "Default = 1");
  82. static int port_name_cnt;
  83. static LIST_HEAD(adapter_list);
  84. static unsigned long ehea_driver_flags;
  85. static DEFINE_MUTEX(dlpar_mem_lock);
  86. struct ehea_fw_handle_array ehea_fw_handles;
  87. struct ehea_bcmc_reg_array ehea_bcmc_regs;
  88. static int __devinit ehea_probe_adapter(struct platform_device *dev,
  89. const struct of_device_id *id);
  90. static int __devexit ehea_remove(struct platform_device *dev);
  91. static struct of_device_id ehea_device_table[] = {
  92. {
  93. .name = "lhea",
  94. .compatible = "IBM,lhea",
  95. },
  96. {},
  97. };
  98. MODULE_DEVICE_TABLE(of, ehea_device_table);
  99. static struct of_platform_driver ehea_driver = {
  100. .driver = {
  101. .name = "ehea",
  102. .owner = THIS_MODULE,
  103. .of_match_table = ehea_device_table,
  104. },
  105. .probe = ehea_probe_adapter,
  106. .remove = ehea_remove,
  107. };
  108. void ehea_dump(void *adr, int len, char *msg)
  109. {
  110. int x;
  111. unsigned char *deb = adr;
  112. for (x = 0; x < len; x += 16) {
  113. pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
  114. msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
  115. deb += 16;
  116. }
  117. }
  118. void ehea_schedule_port_reset(struct ehea_port *port)
  119. {
  120. if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
  121. schedule_work(&port->reset_task);
  122. }
  123. static void ehea_update_firmware_handles(void)
  124. {
  125. struct ehea_fw_handle_entry *arr = NULL;
  126. struct ehea_adapter *adapter;
  127. int num_adapters = 0;
  128. int num_ports = 0;
  129. int num_portres = 0;
  130. int i = 0;
  131. int num_fw_handles, k, l;
  132. /* Determine number of handles */
  133. mutex_lock(&ehea_fw_handles.lock);
  134. list_for_each_entry(adapter, &adapter_list, list) {
  135. num_adapters++;
  136. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  137. struct ehea_port *port = adapter->port[k];
  138. if (!port || (port->state != EHEA_PORT_UP))
  139. continue;
  140. num_ports++;
  141. num_portres += port->num_def_qps;
  142. }
  143. }
  144. num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
  145. num_ports * EHEA_NUM_PORT_FW_HANDLES +
  146. num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
  147. if (num_fw_handles) {
  148. arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
  149. if (!arr)
  150. goto out; /* Keep the existing array */
  151. } else
  152. goto out_update;
  153. list_for_each_entry(adapter, &adapter_list, list) {
  154. if (num_adapters == 0)
  155. break;
  156. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  157. struct ehea_port *port = adapter->port[k];
  158. if (!port || (port->state != EHEA_PORT_UP) ||
  159. (num_ports == 0))
  160. continue;
  161. for (l = 0; l < port->num_def_qps; l++) {
  162. struct ehea_port_res *pr = &port->port_res[l];
  163. arr[i].adh = adapter->handle;
  164. arr[i++].fwh = pr->qp->fw_handle;
  165. arr[i].adh = adapter->handle;
  166. arr[i++].fwh = pr->send_cq->fw_handle;
  167. arr[i].adh = adapter->handle;
  168. arr[i++].fwh = pr->recv_cq->fw_handle;
  169. arr[i].adh = adapter->handle;
  170. arr[i++].fwh = pr->eq->fw_handle;
  171. arr[i].adh = adapter->handle;
  172. arr[i++].fwh = pr->send_mr.handle;
  173. arr[i].adh = adapter->handle;
  174. arr[i++].fwh = pr->recv_mr.handle;
  175. }
  176. arr[i].adh = adapter->handle;
  177. arr[i++].fwh = port->qp_eq->fw_handle;
  178. num_ports--;
  179. }
  180. arr[i].adh = adapter->handle;
  181. arr[i++].fwh = adapter->neq->fw_handle;
  182. if (adapter->mr.handle) {
  183. arr[i].adh = adapter->handle;
  184. arr[i++].fwh = adapter->mr.handle;
  185. }
  186. num_adapters--;
  187. }
  188. out_update:
  189. kfree(ehea_fw_handles.arr);
  190. ehea_fw_handles.arr = arr;
  191. ehea_fw_handles.num_entries = i;
  192. out:
  193. mutex_unlock(&ehea_fw_handles.lock);
  194. }
  195. static void ehea_update_bcmc_registrations(void)
  196. {
  197. unsigned long flags;
  198. struct ehea_bcmc_reg_entry *arr = NULL;
  199. struct ehea_adapter *adapter;
  200. struct ehea_mc_list *mc_entry;
  201. int num_registrations = 0;
  202. int i = 0;
  203. int k;
  204. spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
  205. /* Determine number of registrations */
  206. list_for_each_entry(adapter, &adapter_list, list)
  207. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  208. struct ehea_port *port = adapter->port[k];
  209. if (!port || (port->state != EHEA_PORT_UP))
  210. continue;
  211. num_registrations += 2; /* Broadcast registrations */
  212. list_for_each_entry(mc_entry, &port->mc_list->list,list)
  213. num_registrations += 2;
  214. }
  215. if (num_registrations) {
  216. arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
  217. if (!arr)
  218. goto out; /* Keep the existing array */
  219. } else
  220. goto out_update;
  221. list_for_each_entry(adapter, &adapter_list, list) {
  222. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  223. struct ehea_port *port = adapter->port[k];
  224. if (!port || (port->state != EHEA_PORT_UP))
  225. continue;
  226. if (num_registrations == 0)
  227. goto out_update;
  228. arr[i].adh = adapter->handle;
  229. arr[i].port_id = port->logical_port_id;
  230. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  231. EHEA_BCMC_UNTAGGED;
  232. arr[i++].macaddr = port->mac_addr;
  233. arr[i].adh = adapter->handle;
  234. arr[i].port_id = port->logical_port_id;
  235. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  236. EHEA_BCMC_VLANID_ALL;
  237. arr[i++].macaddr = port->mac_addr;
  238. num_registrations -= 2;
  239. list_for_each_entry(mc_entry,
  240. &port->mc_list->list, list) {
  241. if (num_registrations == 0)
  242. goto out_update;
  243. arr[i].adh = adapter->handle;
  244. arr[i].port_id = port->logical_port_id;
  245. arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
  246. EHEA_BCMC_MULTICAST |
  247. EHEA_BCMC_UNTAGGED;
  248. arr[i++].macaddr = mc_entry->macaddr;
  249. arr[i].adh = adapter->handle;
  250. arr[i].port_id = port->logical_port_id;
  251. arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
  252. EHEA_BCMC_MULTICAST |
  253. EHEA_BCMC_VLANID_ALL;
  254. arr[i++].macaddr = mc_entry->macaddr;
  255. num_registrations -= 2;
  256. }
  257. }
  258. }
  259. out_update:
  260. kfree(ehea_bcmc_regs.arr);
  261. ehea_bcmc_regs.arr = arr;
  262. ehea_bcmc_regs.num_entries = i;
  263. out:
  264. spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
  265. }
  266. static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
  267. struct rtnl_link_stats64 *stats)
  268. {
  269. struct ehea_port *port = netdev_priv(dev);
  270. u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
  271. int i;
  272. for (i = 0; i < port->num_def_qps; i++) {
  273. rx_packets += port->port_res[i].rx_packets;
  274. rx_bytes += port->port_res[i].rx_bytes;
  275. }
  276. for (i = 0; i < port->num_def_qps; i++) {
  277. tx_packets += port->port_res[i].tx_packets;
  278. tx_bytes += port->port_res[i].tx_bytes;
  279. }
  280. stats->tx_packets = tx_packets;
  281. stats->rx_bytes = rx_bytes;
  282. stats->tx_bytes = tx_bytes;
  283. stats->rx_packets = rx_packets;
  284. return &port->stats;
  285. }
  286. static void ehea_update_stats(struct work_struct *work)
  287. {
  288. struct ehea_port *port =
  289. container_of(work, struct ehea_port, stats_work.work);
  290. struct net_device *dev = port->netdev;
  291. struct rtnl_link_stats64 *stats = &port->stats;
  292. struct hcp_ehea_port_cb2 *cb2;
  293. u64 hret;
  294. cb2 = (void *)get_zeroed_page(GFP_KERNEL);
  295. if (!cb2) {
  296. netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
  297. goto resched;
  298. }
  299. hret = ehea_h_query_ehea_port(port->adapter->handle,
  300. port->logical_port_id,
  301. H_PORT_CB2, H_PORT_CB2_ALL, cb2);
  302. if (hret != H_SUCCESS) {
  303. netdev_err(dev, "query_ehea_port failed\n");
  304. goto out_herr;
  305. }
  306. if (netif_msg_hw(port))
  307. ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
  308. stats->multicast = cb2->rxmcp;
  309. stats->rx_errors = cb2->rxuerr;
  310. out_herr:
  311. free_page((unsigned long)cb2);
  312. resched:
  313. schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
  314. }
  315. static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
  316. {
  317. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  318. struct net_device *dev = pr->port->netdev;
  319. int max_index_mask = pr->rq1_skba.len - 1;
  320. int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
  321. int adder = 0;
  322. int i;
  323. pr->rq1_skba.os_skbs = 0;
  324. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  325. if (nr_of_wqes > 0)
  326. pr->rq1_skba.index = index;
  327. pr->rq1_skba.os_skbs = fill_wqes;
  328. return;
  329. }
  330. for (i = 0; i < fill_wqes; i++) {
  331. if (!skb_arr_rq1[index]) {
  332. skb_arr_rq1[index] = netdev_alloc_skb(dev,
  333. EHEA_L_PKT_SIZE);
  334. if (!skb_arr_rq1[index]) {
  335. netdev_info(dev, "Unable to allocate enough skb in the array\n");
  336. pr->rq1_skba.os_skbs = fill_wqes - i;
  337. break;
  338. }
  339. }
  340. index--;
  341. index &= max_index_mask;
  342. adder++;
  343. }
  344. if (adder == 0)
  345. return;
  346. /* Ring doorbell */
  347. ehea_update_rq1a(pr->qp, adder);
  348. }
  349. static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
  350. {
  351. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  352. struct net_device *dev = pr->port->netdev;
  353. int i;
  354. if (nr_rq1a > pr->rq1_skba.len) {
  355. netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
  356. return;
  357. }
  358. for (i = 0; i < nr_rq1a; i++) {
  359. skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
  360. if (!skb_arr_rq1[i]) {
  361. netdev_info(dev, "Not enough memory to allocate skb array\n");
  362. break;
  363. }
  364. }
  365. /* Ring doorbell */
  366. ehea_update_rq1a(pr->qp, i - 1);
  367. }
  368. static int ehea_refill_rq_def(struct ehea_port_res *pr,
  369. struct ehea_q_skb_arr *q_skba, int rq_nr,
  370. int num_wqes, int wqe_type, int packet_size)
  371. {
  372. struct net_device *dev = pr->port->netdev;
  373. struct ehea_qp *qp = pr->qp;
  374. struct sk_buff **skb_arr = q_skba->arr;
  375. struct ehea_rwqe *rwqe;
  376. int i, index, max_index_mask, fill_wqes;
  377. int adder = 0;
  378. int ret = 0;
  379. fill_wqes = q_skba->os_skbs + num_wqes;
  380. q_skba->os_skbs = 0;
  381. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  382. q_skba->os_skbs = fill_wqes;
  383. return ret;
  384. }
  385. index = q_skba->index;
  386. max_index_mask = q_skba->len - 1;
  387. for (i = 0; i < fill_wqes; i++) {
  388. u64 tmp_addr;
  389. struct sk_buff *skb;
  390. skb = netdev_alloc_skb_ip_align(dev, packet_size);
  391. if (!skb) {
  392. q_skba->os_skbs = fill_wqes - i;
  393. if (q_skba->os_skbs == q_skba->len - 2) {
  394. netdev_info(pr->port->netdev,
  395. "rq%i ran dry - no mem for skb\n",
  396. rq_nr);
  397. ret = -ENOMEM;
  398. }
  399. break;
  400. }
  401. skb_arr[index] = skb;
  402. tmp_addr = ehea_map_vaddr(skb->data);
  403. if (tmp_addr == -1) {
  404. dev_kfree_skb(skb);
  405. q_skba->os_skbs = fill_wqes - i;
  406. ret = 0;
  407. break;
  408. }
  409. rwqe = ehea_get_next_rwqe(qp, rq_nr);
  410. rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
  411. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
  412. rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
  413. rwqe->sg_list[0].vaddr = tmp_addr;
  414. rwqe->sg_list[0].len = packet_size;
  415. rwqe->data_segments = 1;
  416. index++;
  417. index &= max_index_mask;
  418. adder++;
  419. }
  420. q_skba->index = index;
  421. if (adder == 0)
  422. goto out;
  423. /* Ring doorbell */
  424. iosync();
  425. if (rq_nr == 2)
  426. ehea_update_rq2a(pr->qp, adder);
  427. else
  428. ehea_update_rq3a(pr->qp, adder);
  429. out:
  430. return ret;
  431. }
  432. static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
  433. {
  434. return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
  435. nr_of_wqes, EHEA_RWQE2_TYPE,
  436. EHEA_RQ2_PKT_SIZE);
  437. }
  438. static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
  439. {
  440. return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
  441. nr_of_wqes, EHEA_RWQE3_TYPE,
  442. EHEA_MAX_PACKET_SIZE);
  443. }
  444. static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
  445. {
  446. *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
  447. if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
  448. return 0;
  449. if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
  450. (cqe->header_length == 0))
  451. return 0;
  452. return -EINVAL;
  453. }
  454. static inline void ehea_fill_skb(struct net_device *dev,
  455. struct sk_buff *skb, struct ehea_cqe *cqe,
  456. struct ehea_port_res *pr)
  457. {
  458. int length = cqe->num_bytes_transfered - 4; /*remove CRC */
  459. skb_put(skb, length);
  460. skb->protocol = eth_type_trans(skb, dev);
  461. /* The packet was not an IPV4 packet so a complemented checksum was
  462. calculated. The value is found in the Internet Checksum field. */
  463. if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
  464. skb->ip_summed = CHECKSUM_COMPLETE;
  465. skb->csum = csum_unfold(~cqe->inet_checksum_value);
  466. } else
  467. skb->ip_summed = CHECKSUM_UNNECESSARY;
  468. skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
  469. }
  470. static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
  471. int arr_len,
  472. struct ehea_cqe *cqe)
  473. {
  474. int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  475. struct sk_buff *skb;
  476. void *pref;
  477. int x;
  478. x = skb_index + 1;
  479. x &= (arr_len - 1);
  480. pref = skb_array[x];
  481. if (pref) {
  482. prefetchw(pref);
  483. prefetchw(pref + EHEA_CACHE_LINE);
  484. pref = (skb_array[x]->data);
  485. prefetch(pref);
  486. prefetch(pref + EHEA_CACHE_LINE);
  487. prefetch(pref + EHEA_CACHE_LINE * 2);
  488. prefetch(pref + EHEA_CACHE_LINE * 3);
  489. }
  490. skb = skb_array[skb_index];
  491. skb_array[skb_index] = NULL;
  492. return skb;
  493. }
  494. static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
  495. int arr_len, int wqe_index)
  496. {
  497. struct sk_buff *skb;
  498. void *pref;
  499. int x;
  500. x = wqe_index + 1;
  501. x &= (arr_len - 1);
  502. pref = skb_array[x];
  503. if (pref) {
  504. prefetchw(pref);
  505. prefetchw(pref + EHEA_CACHE_LINE);
  506. pref = (skb_array[x]->data);
  507. prefetchw(pref);
  508. prefetchw(pref + EHEA_CACHE_LINE);
  509. }
  510. skb = skb_array[wqe_index];
  511. skb_array[wqe_index] = NULL;
  512. return skb;
  513. }
  514. static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
  515. struct ehea_cqe *cqe, int *processed_rq2,
  516. int *processed_rq3)
  517. {
  518. struct sk_buff *skb;
  519. if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
  520. pr->p_stats.err_tcp_cksum++;
  521. if (cqe->status & EHEA_CQE_STAT_ERR_IP)
  522. pr->p_stats.err_ip_cksum++;
  523. if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
  524. pr->p_stats.err_frame_crc++;
  525. if (rq == 2) {
  526. *processed_rq2 += 1;
  527. skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
  528. dev_kfree_skb(skb);
  529. } else if (rq == 3) {
  530. *processed_rq3 += 1;
  531. skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
  532. dev_kfree_skb(skb);
  533. }
  534. if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
  535. if (netif_msg_rx_err(pr->port)) {
  536. pr_err("Critical receive error for QP %d. Resetting port.\n",
  537. pr->qp->init_attr.qp_nr);
  538. ehea_dump(cqe, sizeof(*cqe), "CQE");
  539. }
  540. ehea_schedule_port_reset(pr->port);
  541. return 1;
  542. }
  543. return 0;
  544. }
  545. static int ehea_proc_rwqes(struct net_device *dev,
  546. struct ehea_port_res *pr,
  547. int budget)
  548. {
  549. struct ehea_port *port = pr->port;
  550. struct ehea_qp *qp = pr->qp;
  551. struct ehea_cqe *cqe;
  552. struct sk_buff *skb;
  553. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  554. struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
  555. struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
  556. int skb_arr_rq1_len = pr->rq1_skba.len;
  557. int skb_arr_rq2_len = pr->rq2_skba.len;
  558. int skb_arr_rq3_len = pr->rq3_skba.len;
  559. int processed, processed_rq1, processed_rq2, processed_rq3;
  560. u64 processed_bytes = 0;
  561. int wqe_index, last_wqe_index, rq, port_reset;
  562. processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
  563. last_wqe_index = 0;
  564. cqe = ehea_poll_rq1(qp, &wqe_index);
  565. while ((processed < budget) && cqe) {
  566. ehea_inc_rq1(qp);
  567. processed_rq1++;
  568. processed++;
  569. if (netif_msg_rx_status(port))
  570. ehea_dump(cqe, sizeof(*cqe), "CQE");
  571. last_wqe_index = wqe_index;
  572. rmb();
  573. if (!ehea_check_cqe(cqe, &rq)) {
  574. if (rq == 1) {
  575. /* LL RQ1 */
  576. skb = get_skb_by_index_ll(skb_arr_rq1,
  577. skb_arr_rq1_len,
  578. wqe_index);
  579. if (unlikely(!skb)) {
  580. netif_info(port, rx_err, dev,
  581. "LL rq1: skb=NULL\n");
  582. skb = netdev_alloc_skb(dev,
  583. EHEA_L_PKT_SIZE);
  584. if (!skb) {
  585. netdev_err(dev, "Not enough memory to allocate skb\n");
  586. break;
  587. }
  588. }
  589. skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
  590. cqe->num_bytes_transfered - 4);
  591. ehea_fill_skb(dev, skb, cqe, pr);
  592. } else if (rq == 2) {
  593. /* RQ2 */
  594. skb = get_skb_by_index(skb_arr_rq2,
  595. skb_arr_rq2_len, cqe);
  596. if (unlikely(!skb)) {
  597. netif_err(port, rx_err, dev,
  598. "rq2: skb=NULL\n");
  599. break;
  600. }
  601. ehea_fill_skb(dev, skb, cqe, pr);
  602. processed_rq2++;
  603. } else {
  604. /* RQ3 */
  605. skb = get_skb_by_index(skb_arr_rq3,
  606. skb_arr_rq3_len, cqe);
  607. if (unlikely(!skb)) {
  608. netif_err(port, rx_err, dev,
  609. "rq3: skb=NULL\n");
  610. break;
  611. }
  612. ehea_fill_skb(dev, skb, cqe, pr);
  613. processed_rq3++;
  614. }
  615. processed_bytes += skb->len;
  616. if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
  617. __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
  618. napi_gro_receive(&pr->napi, skb);
  619. } else {
  620. pr->p_stats.poll_receive_errors++;
  621. port_reset = ehea_treat_poll_error(pr, rq, cqe,
  622. &processed_rq2,
  623. &processed_rq3);
  624. if (port_reset)
  625. break;
  626. }
  627. cqe = ehea_poll_rq1(qp, &wqe_index);
  628. }
  629. pr->rx_packets += processed;
  630. pr->rx_bytes += processed_bytes;
  631. ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
  632. ehea_refill_rq2(pr, processed_rq2);
  633. ehea_refill_rq3(pr, processed_rq3);
  634. return processed;
  635. }
  636. #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
  637. static void reset_sq_restart_flag(struct ehea_port *port)
  638. {
  639. int i;
  640. for (i = 0; i < port->num_def_qps; i++) {
  641. struct ehea_port_res *pr = &port->port_res[i];
  642. pr->sq_restart_flag = 0;
  643. }
  644. wake_up(&port->restart_wq);
  645. }
  646. static void check_sqs(struct ehea_port *port)
  647. {
  648. struct ehea_swqe *swqe;
  649. int swqe_index;
  650. int i, k;
  651. for (i = 0; i < port->num_def_qps; i++) {
  652. struct ehea_port_res *pr = &port->port_res[i];
  653. int ret;
  654. k = 0;
  655. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  656. memset(swqe, 0, SWQE_HEADER_SIZE);
  657. atomic_dec(&pr->swqe_avail);
  658. swqe->tx_control |= EHEA_SWQE_PURGE;
  659. swqe->wr_id = SWQE_RESTART_CHECK;
  660. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  661. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
  662. swqe->immediate_data_length = 80;
  663. ehea_post_swqe(pr->qp, swqe);
  664. ret = wait_event_timeout(port->restart_wq,
  665. pr->sq_restart_flag == 0,
  666. msecs_to_jiffies(100));
  667. if (!ret) {
  668. pr_err("HW/SW queues out of sync\n");
  669. ehea_schedule_port_reset(pr->port);
  670. return;
  671. }
  672. }
  673. }
  674. static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
  675. {
  676. struct sk_buff *skb;
  677. struct ehea_cq *send_cq = pr->send_cq;
  678. struct ehea_cqe *cqe;
  679. int quota = my_quota;
  680. int cqe_counter = 0;
  681. int swqe_av = 0;
  682. int index;
  683. struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
  684. pr - &pr->port->port_res[0]);
  685. cqe = ehea_poll_cq(send_cq);
  686. while (cqe && (quota > 0)) {
  687. ehea_inc_cq(send_cq);
  688. cqe_counter++;
  689. rmb();
  690. if (cqe->wr_id == SWQE_RESTART_CHECK) {
  691. pr->sq_restart_flag = 1;
  692. swqe_av++;
  693. break;
  694. }
  695. if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
  696. pr_err("Bad send completion status=0x%04X\n",
  697. cqe->status);
  698. if (netif_msg_tx_err(pr->port))
  699. ehea_dump(cqe, sizeof(*cqe), "Send CQE");
  700. if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
  701. pr_err("Resetting port\n");
  702. ehea_schedule_port_reset(pr->port);
  703. break;
  704. }
  705. }
  706. if (netif_msg_tx_done(pr->port))
  707. ehea_dump(cqe, sizeof(*cqe), "CQE");
  708. if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
  709. == EHEA_SWQE2_TYPE)) {
  710. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  711. skb = pr->sq_skba.arr[index];
  712. dev_kfree_skb(skb);
  713. pr->sq_skba.arr[index] = NULL;
  714. }
  715. swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
  716. quota--;
  717. cqe = ehea_poll_cq(send_cq);
  718. }
  719. ehea_update_feca(send_cq, cqe_counter);
  720. atomic_add(swqe_av, &pr->swqe_avail);
  721. if (unlikely(netif_tx_queue_stopped(txq) &&
  722. (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
  723. __netif_tx_lock(txq, smp_processor_id());
  724. if (netif_tx_queue_stopped(txq) &&
  725. (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
  726. netif_tx_wake_queue(txq);
  727. __netif_tx_unlock(txq);
  728. }
  729. wake_up(&pr->port->swqe_avail_wq);
  730. return cqe;
  731. }
  732. #define EHEA_POLL_MAX_CQES 65535
  733. static int ehea_poll(struct napi_struct *napi, int budget)
  734. {
  735. struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
  736. napi);
  737. struct net_device *dev = pr->port->netdev;
  738. struct ehea_cqe *cqe;
  739. struct ehea_cqe *cqe_skb = NULL;
  740. int wqe_index;
  741. int rx = 0;
  742. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  743. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  744. while (rx != budget) {
  745. napi_complete(napi);
  746. ehea_reset_cq_ep(pr->recv_cq);
  747. ehea_reset_cq_ep(pr->send_cq);
  748. ehea_reset_cq_n1(pr->recv_cq);
  749. ehea_reset_cq_n1(pr->send_cq);
  750. rmb();
  751. cqe = ehea_poll_rq1(pr->qp, &wqe_index);
  752. cqe_skb = ehea_poll_cq(pr->send_cq);
  753. if (!cqe && !cqe_skb)
  754. return rx;
  755. if (!napi_reschedule(napi))
  756. return rx;
  757. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  758. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  759. }
  760. return rx;
  761. }
  762. #ifdef CONFIG_NET_POLL_CONTROLLER
  763. static void ehea_netpoll(struct net_device *dev)
  764. {
  765. struct ehea_port *port = netdev_priv(dev);
  766. int i;
  767. for (i = 0; i < port->num_def_qps; i++)
  768. napi_schedule(&port->port_res[i].napi);
  769. }
  770. #endif
  771. static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
  772. {
  773. struct ehea_port_res *pr = param;
  774. napi_schedule(&pr->napi);
  775. return IRQ_HANDLED;
  776. }
  777. static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
  778. {
  779. struct ehea_port *port = param;
  780. struct ehea_eqe *eqe;
  781. struct ehea_qp *qp;
  782. u32 qp_token;
  783. u64 resource_type, aer, aerr;
  784. int reset_port = 0;
  785. eqe = ehea_poll_eq(port->qp_eq);
  786. while (eqe) {
  787. qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
  788. pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
  789. eqe->entry, qp_token);
  790. qp = port->port_res[qp_token].qp;
  791. resource_type = ehea_error_data(port->adapter, qp->fw_handle,
  792. &aer, &aerr);
  793. if (resource_type == EHEA_AER_RESTYPE_QP) {
  794. if ((aer & EHEA_AER_RESET_MASK) ||
  795. (aerr & EHEA_AERR_RESET_MASK))
  796. reset_port = 1;
  797. } else
  798. reset_port = 1; /* Reset in case of CQ or EQ error */
  799. eqe = ehea_poll_eq(port->qp_eq);
  800. }
  801. if (reset_port) {
  802. pr_err("Resetting port\n");
  803. ehea_schedule_port_reset(port);
  804. }
  805. return IRQ_HANDLED;
  806. }
  807. static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
  808. int logical_port)
  809. {
  810. int i;
  811. for (i = 0; i < EHEA_MAX_PORTS; i++)
  812. if (adapter->port[i])
  813. if (adapter->port[i]->logical_port_id == logical_port)
  814. return adapter->port[i];
  815. return NULL;
  816. }
  817. int ehea_sense_port_attr(struct ehea_port *port)
  818. {
  819. int ret;
  820. u64 hret;
  821. struct hcp_ehea_port_cb0 *cb0;
  822. /* may be called via ehea_neq_tasklet() */
  823. cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
  824. if (!cb0) {
  825. pr_err("no mem for cb0\n");
  826. ret = -ENOMEM;
  827. goto out;
  828. }
  829. hret = ehea_h_query_ehea_port(port->adapter->handle,
  830. port->logical_port_id, H_PORT_CB0,
  831. EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
  832. cb0);
  833. if (hret != H_SUCCESS) {
  834. ret = -EIO;
  835. goto out_free;
  836. }
  837. /* MAC address */
  838. port->mac_addr = cb0->port_mac_addr << 16;
  839. if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
  840. ret = -EADDRNOTAVAIL;
  841. goto out_free;
  842. }
  843. /* Port speed */
  844. switch (cb0->port_speed) {
  845. case H_SPEED_10M_H:
  846. port->port_speed = EHEA_SPEED_10M;
  847. port->full_duplex = 0;
  848. break;
  849. case H_SPEED_10M_F:
  850. port->port_speed = EHEA_SPEED_10M;
  851. port->full_duplex = 1;
  852. break;
  853. case H_SPEED_100M_H:
  854. port->port_speed = EHEA_SPEED_100M;
  855. port->full_duplex = 0;
  856. break;
  857. case H_SPEED_100M_F:
  858. port->port_speed = EHEA_SPEED_100M;
  859. port->full_duplex = 1;
  860. break;
  861. case H_SPEED_1G_F:
  862. port->port_speed = EHEA_SPEED_1G;
  863. port->full_duplex = 1;
  864. break;
  865. case H_SPEED_10G_F:
  866. port->port_speed = EHEA_SPEED_10G;
  867. port->full_duplex = 1;
  868. break;
  869. default:
  870. port->port_speed = 0;
  871. port->full_duplex = 0;
  872. break;
  873. }
  874. port->autoneg = 1;
  875. port->num_mcs = cb0->num_default_qps;
  876. /* Number of default QPs */
  877. if (use_mcs)
  878. port->num_def_qps = cb0->num_default_qps;
  879. else
  880. port->num_def_qps = 1;
  881. if (!port->num_def_qps) {
  882. ret = -EINVAL;
  883. goto out_free;
  884. }
  885. ret = 0;
  886. out_free:
  887. if (ret || netif_msg_probe(port))
  888. ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
  889. free_page((unsigned long)cb0);
  890. out:
  891. return ret;
  892. }
  893. int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
  894. {
  895. struct hcp_ehea_port_cb4 *cb4;
  896. u64 hret;
  897. int ret = 0;
  898. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  899. if (!cb4) {
  900. pr_err("no mem for cb4\n");
  901. ret = -ENOMEM;
  902. goto out;
  903. }
  904. cb4->port_speed = port_speed;
  905. netif_carrier_off(port->netdev);
  906. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  907. port->logical_port_id,
  908. H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
  909. if (hret == H_SUCCESS) {
  910. port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
  911. hret = ehea_h_query_ehea_port(port->adapter->handle,
  912. port->logical_port_id,
  913. H_PORT_CB4, H_PORT_CB4_SPEED,
  914. cb4);
  915. if (hret == H_SUCCESS) {
  916. switch (cb4->port_speed) {
  917. case H_SPEED_10M_H:
  918. port->port_speed = EHEA_SPEED_10M;
  919. port->full_duplex = 0;
  920. break;
  921. case H_SPEED_10M_F:
  922. port->port_speed = EHEA_SPEED_10M;
  923. port->full_duplex = 1;
  924. break;
  925. case H_SPEED_100M_H:
  926. port->port_speed = EHEA_SPEED_100M;
  927. port->full_duplex = 0;
  928. break;
  929. case H_SPEED_100M_F:
  930. port->port_speed = EHEA_SPEED_100M;
  931. port->full_duplex = 1;
  932. break;
  933. case H_SPEED_1G_F:
  934. port->port_speed = EHEA_SPEED_1G;
  935. port->full_duplex = 1;
  936. break;
  937. case H_SPEED_10G_F:
  938. port->port_speed = EHEA_SPEED_10G;
  939. port->full_duplex = 1;
  940. break;
  941. default:
  942. port->port_speed = 0;
  943. port->full_duplex = 0;
  944. break;
  945. }
  946. } else {
  947. pr_err("Failed sensing port speed\n");
  948. ret = -EIO;
  949. }
  950. } else {
  951. if (hret == H_AUTHORITY) {
  952. pr_info("Hypervisor denied setting port speed\n");
  953. ret = -EPERM;
  954. } else {
  955. ret = -EIO;
  956. pr_err("Failed setting port speed\n");
  957. }
  958. }
  959. if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
  960. netif_carrier_on(port->netdev);
  961. free_page((unsigned long)cb4);
  962. out:
  963. return ret;
  964. }
  965. static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
  966. {
  967. int ret;
  968. u8 ec;
  969. u8 portnum;
  970. struct ehea_port *port;
  971. struct net_device *dev;
  972. ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
  973. portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
  974. port = ehea_get_port(adapter, portnum);
  975. dev = port->netdev;
  976. switch (ec) {
  977. case EHEA_EC_PORTSTATE_CHG: /* port state change */
  978. if (!port) {
  979. netdev_err(dev, "unknown portnum %x\n", portnum);
  980. break;
  981. }
  982. if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
  983. if (!netif_carrier_ok(dev)) {
  984. ret = ehea_sense_port_attr(port);
  985. if (ret) {
  986. netdev_err(dev, "failed resensing port attributes\n");
  987. break;
  988. }
  989. netif_info(port, link, dev,
  990. "Logical port up: %dMbps %s Duplex\n",
  991. port->port_speed,
  992. port->full_duplex == 1 ?
  993. "Full" : "Half");
  994. netif_carrier_on(dev);
  995. netif_wake_queue(dev);
  996. }
  997. } else
  998. if (netif_carrier_ok(dev)) {
  999. netif_info(port, link, dev,
  1000. "Logical port down\n");
  1001. netif_carrier_off(dev);
  1002. netif_tx_disable(dev);
  1003. }
  1004. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
  1005. port->phy_link = EHEA_PHY_LINK_UP;
  1006. netif_info(port, link, dev,
  1007. "Physical port up\n");
  1008. if (prop_carrier_state)
  1009. netif_carrier_on(dev);
  1010. } else {
  1011. port->phy_link = EHEA_PHY_LINK_DOWN;
  1012. netif_info(port, link, dev,
  1013. "Physical port down\n");
  1014. if (prop_carrier_state)
  1015. netif_carrier_off(dev);
  1016. }
  1017. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
  1018. netdev_info(dev,
  1019. "External switch port is primary port\n");
  1020. else
  1021. netdev_info(dev,
  1022. "External switch port is backup port\n");
  1023. break;
  1024. case EHEA_EC_ADAPTER_MALFUNC:
  1025. netdev_err(dev, "Adapter malfunction\n");
  1026. break;
  1027. case EHEA_EC_PORT_MALFUNC:
  1028. netdev_info(dev, "Port malfunction\n");
  1029. netif_carrier_off(dev);
  1030. netif_tx_disable(dev);
  1031. break;
  1032. default:
  1033. netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
  1034. break;
  1035. }
  1036. }
  1037. static void ehea_neq_tasklet(unsigned long data)
  1038. {
  1039. struct ehea_adapter *adapter = (struct ehea_adapter *)data;
  1040. struct ehea_eqe *eqe;
  1041. u64 event_mask;
  1042. eqe = ehea_poll_eq(adapter->neq);
  1043. pr_debug("eqe=%p\n", eqe);
  1044. while (eqe) {
  1045. pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
  1046. ehea_parse_eqe(adapter, eqe->entry);
  1047. eqe = ehea_poll_eq(adapter->neq);
  1048. pr_debug("next eqe=%p\n", eqe);
  1049. }
  1050. event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
  1051. | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
  1052. | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
  1053. ehea_h_reset_events(adapter->handle,
  1054. adapter->neq->fw_handle, event_mask);
  1055. }
  1056. static irqreturn_t ehea_interrupt_neq(int irq, void *param)
  1057. {
  1058. struct ehea_adapter *adapter = param;
  1059. tasklet_hi_schedule(&adapter->neq_tasklet);
  1060. return IRQ_HANDLED;
  1061. }
  1062. static int ehea_fill_port_res(struct ehea_port_res *pr)
  1063. {
  1064. int ret;
  1065. struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
  1066. ehea_init_fill_rq1(pr, pr->rq1_skba.len);
  1067. ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
  1068. ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
  1069. return ret;
  1070. }
  1071. static int ehea_reg_interrupts(struct net_device *dev)
  1072. {
  1073. struct ehea_port *port = netdev_priv(dev);
  1074. struct ehea_port_res *pr;
  1075. int i, ret;
  1076. snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
  1077. dev->name);
  1078. ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
  1079. ehea_qp_aff_irq_handler,
  1080. IRQF_DISABLED, port->int_aff_name, port);
  1081. if (ret) {
  1082. netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
  1083. port->qp_eq->attr.ist1);
  1084. goto out_free_qpeq;
  1085. }
  1086. netif_info(port, ifup, dev,
  1087. "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
  1088. port->qp_eq->attr.ist1);
  1089. for (i = 0; i < port->num_def_qps; i++) {
  1090. pr = &port->port_res[i];
  1091. snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
  1092. "%s-queue%d", dev->name, i);
  1093. ret = ibmebus_request_irq(pr->eq->attr.ist1,
  1094. ehea_recv_irq_handler,
  1095. IRQF_DISABLED, pr->int_send_name,
  1096. pr);
  1097. if (ret) {
  1098. netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
  1099. i, pr->eq->attr.ist1);
  1100. goto out_free_req;
  1101. }
  1102. netif_info(port, ifup, dev,
  1103. "irq_handle 0x%X for function ehea_queue_int %d registered\n",
  1104. pr->eq->attr.ist1, i);
  1105. }
  1106. out:
  1107. return ret;
  1108. out_free_req:
  1109. while (--i >= 0) {
  1110. u32 ist = port->port_res[i].eq->attr.ist1;
  1111. ibmebus_free_irq(ist, &port->port_res[i]);
  1112. }
  1113. out_free_qpeq:
  1114. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1115. i = port->num_def_qps;
  1116. goto out;
  1117. }
  1118. static void ehea_free_interrupts(struct net_device *dev)
  1119. {
  1120. struct ehea_port *port = netdev_priv(dev);
  1121. struct ehea_port_res *pr;
  1122. int i;
  1123. /* send */
  1124. for (i = 0; i < port->num_def_qps; i++) {
  1125. pr = &port->port_res[i];
  1126. ibmebus_free_irq(pr->eq->attr.ist1, pr);
  1127. netif_info(port, intr, dev,
  1128. "free send irq for res %d with handle 0x%X\n",
  1129. i, pr->eq->attr.ist1);
  1130. }
  1131. /* associated events */
  1132. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1133. netif_info(port, intr, dev,
  1134. "associated event interrupt for handle 0x%X freed\n",
  1135. port->qp_eq->attr.ist1);
  1136. }
  1137. static int ehea_configure_port(struct ehea_port *port)
  1138. {
  1139. int ret, i;
  1140. u64 hret, mask;
  1141. struct hcp_ehea_port_cb0 *cb0;
  1142. ret = -ENOMEM;
  1143. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1144. if (!cb0)
  1145. goto out;
  1146. cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
  1147. | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
  1148. | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
  1149. | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
  1150. | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
  1151. PXLY_RC_VLAN_FILTER)
  1152. | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
  1153. for (i = 0; i < port->num_mcs; i++)
  1154. if (use_mcs)
  1155. cb0->default_qpn_arr[i] =
  1156. port->port_res[i].qp->init_attr.qp_nr;
  1157. else
  1158. cb0->default_qpn_arr[i] =
  1159. port->port_res[0].qp->init_attr.qp_nr;
  1160. if (netif_msg_ifup(port))
  1161. ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
  1162. mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
  1163. | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
  1164. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1165. port->logical_port_id,
  1166. H_PORT_CB0, mask, cb0);
  1167. ret = -EIO;
  1168. if (hret != H_SUCCESS)
  1169. goto out_free;
  1170. ret = 0;
  1171. out_free:
  1172. free_page((unsigned long)cb0);
  1173. out:
  1174. return ret;
  1175. }
  1176. int ehea_gen_smrs(struct ehea_port_res *pr)
  1177. {
  1178. int ret;
  1179. struct ehea_adapter *adapter = pr->port->adapter;
  1180. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
  1181. if (ret)
  1182. goto out;
  1183. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
  1184. if (ret)
  1185. goto out_free;
  1186. return 0;
  1187. out_free:
  1188. ehea_rem_mr(&pr->send_mr);
  1189. out:
  1190. pr_err("Generating SMRS failed\n");
  1191. return -EIO;
  1192. }
  1193. int ehea_rem_smrs(struct ehea_port_res *pr)
  1194. {
  1195. if ((ehea_rem_mr(&pr->send_mr)) ||
  1196. (ehea_rem_mr(&pr->recv_mr)))
  1197. return -EIO;
  1198. else
  1199. return 0;
  1200. }
  1201. static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
  1202. {
  1203. int arr_size = sizeof(void *) * max_q_entries;
  1204. q_skba->arr = vzalloc(arr_size);
  1205. if (!q_skba->arr)
  1206. return -ENOMEM;
  1207. q_skba->len = max_q_entries;
  1208. q_skba->index = 0;
  1209. q_skba->os_skbs = 0;
  1210. return 0;
  1211. }
  1212. static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
  1213. struct port_res_cfg *pr_cfg, int queue_token)
  1214. {
  1215. struct ehea_adapter *adapter = port->adapter;
  1216. enum ehea_eq_type eq_type = EHEA_EQ;
  1217. struct ehea_qp_init_attr *init_attr = NULL;
  1218. int ret = -EIO;
  1219. u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
  1220. tx_bytes = pr->tx_bytes;
  1221. tx_packets = pr->tx_packets;
  1222. rx_bytes = pr->rx_bytes;
  1223. rx_packets = pr->rx_packets;
  1224. memset(pr, 0, sizeof(struct ehea_port_res));
  1225. pr->tx_bytes = rx_bytes;
  1226. pr->tx_packets = tx_packets;
  1227. pr->rx_bytes = rx_bytes;
  1228. pr->rx_packets = rx_packets;
  1229. pr->port = port;
  1230. pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
  1231. if (!pr->eq) {
  1232. pr_err("create_eq failed (eq)\n");
  1233. goto out_free;
  1234. }
  1235. pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
  1236. pr->eq->fw_handle,
  1237. port->logical_port_id);
  1238. if (!pr->recv_cq) {
  1239. pr_err("create_cq failed (cq_recv)\n");
  1240. goto out_free;
  1241. }
  1242. pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
  1243. pr->eq->fw_handle,
  1244. port->logical_port_id);
  1245. if (!pr->send_cq) {
  1246. pr_err("create_cq failed (cq_send)\n");
  1247. goto out_free;
  1248. }
  1249. if (netif_msg_ifup(port))
  1250. pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
  1251. pr->send_cq->attr.act_nr_of_cqes,
  1252. pr->recv_cq->attr.act_nr_of_cqes);
  1253. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  1254. if (!init_attr) {
  1255. ret = -ENOMEM;
  1256. pr_err("no mem for ehea_qp_init_attr\n");
  1257. goto out_free;
  1258. }
  1259. init_attr->low_lat_rq1 = 1;
  1260. init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
  1261. init_attr->rq_count = 3;
  1262. init_attr->qp_token = queue_token;
  1263. init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
  1264. init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
  1265. init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
  1266. init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
  1267. init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
  1268. init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
  1269. init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
  1270. init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
  1271. init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
  1272. init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
  1273. init_attr->port_nr = port->logical_port_id;
  1274. init_attr->send_cq_handle = pr->send_cq->fw_handle;
  1275. init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
  1276. init_attr->aff_eq_handle = port->qp_eq->fw_handle;
  1277. pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
  1278. if (!pr->qp) {
  1279. pr_err("create_qp failed\n");
  1280. ret = -EIO;
  1281. goto out_free;
  1282. }
  1283. if (netif_msg_ifup(port))
  1284. pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
  1285. init_attr->qp_nr,
  1286. init_attr->act_nr_send_wqes,
  1287. init_attr->act_nr_rwqes_rq1,
  1288. init_attr->act_nr_rwqes_rq2,
  1289. init_attr->act_nr_rwqes_rq3);
  1290. pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
  1291. ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
  1292. ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
  1293. ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
  1294. ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
  1295. if (ret)
  1296. goto out_free;
  1297. pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
  1298. if (ehea_gen_smrs(pr) != 0) {
  1299. ret = -EIO;
  1300. goto out_free;
  1301. }
  1302. atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
  1303. kfree(init_attr);
  1304. netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
  1305. ret = 0;
  1306. goto out;
  1307. out_free:
  1308. kfree(init_attr);
  1309. vfree(pr->sq_skba.arr);
  1310. vfree(pr->rq1_skba.arr);
  1311. vfree(pr->rq2_skba.arr);
  1312. vfree(pr->rq3_skba.arr);
  1313. ehea_destroy_qp(pr->qp);
  1314. ehea_destroy_cq(pr->send_cq);
  1315. ehea_destroy_cq(pr->recv_cq);
  1316. ehea_destroy_eq(pr->eq);
  1317. out:
  1318. return ret;
  1319. }
  1320. static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
  1321. {
  1322. int ret, i;
  1323. if (pr->qp)
  1324. netif_napi_del(&pr->napi);
  1325. ret = ehea_destroy_qp(pr->qp);
  1326. if (!ret) {
  1327. ehea_destroy_cq(pr->send_cq);
  1328. ehea_destroy_cq(pr->recv_cq);
  1329. ehea_destroy_eq(pr->eq);
  1330. for (i = 0; i < pr->rq1_skba.len; i++)
  1331. if (pr->rq1_skba.arr[i])
  1332. dev_kfree_skb(pr->rq1_skba.arr[i]);
  1333. for (i = 0; i < pr->rq2_skba.len; i++)
  1334. if (pr->rq2_skba.arr[i])
  1335. dev_kfree_skb(pr->rq2_skba.arr[i]);
  1336. for (i = 0; i < pr->rq3_skba.len; i++)
  1337. if (pr->rq3_skba.arr[i])
  1338. dev_kfree_skb(pr->rq3_skba.arr[i]);
  1339. for (i = 0; i < pr->sq_skba.len; i++)
  1340. if (pr->sq_skba.arr[i])
  1341. dev_kfree_skb(pr->sq_skba.arr[i]);
  1342. vfree(pr->rq1_skba.arr);
  1343. vfree(pr->rq2_skba.arr);
  1344. vfree(pr->rq3_skba.arr);
  1345. vfree(pr->sq_skba.arr);
  1346. ret = ehea_rem_smrs(pr);
  1347. }
  1348. return ret;
  1349. }
  1350. static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
  1351. u32 lkey)
  1352. {
  1353. int skb_data_size = skb_headlen(skb);
  1354. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1355. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1356. unsigned int immediate_len = SWQE2_MAX_IMM;
  1357. swqe->descriptors = 0;
  1358. if (skb_is_gso(skb)) {
  1359. swqe->tx_control |= EHEA_SWQE_TSO;
  1360. swqe->mss = skb_shinfo(skb)->gso_size;
  1361. /*
  1362. * For TSO packets we only copy the headers into the
  1363. * immediate area.
  1364. */
  1365. immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
  1366. }
  1367. if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
  1368. skb_copy_from_linear_data(skb, imm_data, immediate_len);
  1369. swqe->immediate_data_length = immediate_len;
  1370. if (skb_data_size > immediate_len) {
  1371. sg1entry->l_key = lkey;
  1372. sg1entry->len = skb_data_size - immediate_len;
  1373. sg1entry->vaddr =
  1374. ehea_map_vaddr(skb->data + immediate_len);
  1375. swqe->descriptors++;
  1376. }
  1377. } else {
  1378. skb_copy_from_linear_data(skb, imm_data, skb_data_size);
  1379. swqe->immediate_data_length = skb_data_size;
  1380. }
  1381. }
  1382. static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
  1383. struct ehea_swqe *swqe, u32 lkey)
  1384. {
  1385. struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
  1386. skb_frag_t *frag;
  1387. int nfrags, sg1entry_contains_frag_data, i;
  1388. nfrags = skb_shinfo(skb)->nr_frags;
  1389. sg1entry = &swqe->u.immdata_desc.sg_entry;
  1390. sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
  1391. sg1entry_contains_frag_data = 0;
  1392. write_swqe2_immediate(skb, swqe, lkey);
  1393. /* write descriptors */
  1394. if (nfrags > 0) {
  1395. if (swqe->descriptors == 0) {
  1396. /* sg1entry not yet used */
  1397. frag = &skb_shinfo(skb)->frags[0];
  1398. /* copy sg1entry data */
  1399. sg1entry->l_key = lkey;
  1400. sg1entry->len = skb_frag_size(frag);
  1401. sg1entry->vaddr =
  1402. ehea_map_vaddr(skb_frag_address(frag));
  1403. swqe->descriptors++;
  1404. sg1entry_contains_frag_data = 1;
  1405. }
  1406. for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
  1407. frag = &skb_shinfo(skb)->frags[i];
  1408. sgentry = &sg_list[i - sg1entry_contains_frag_data];
  1409. sgentry->l_key = lkey;
  1410. sgentry->len = skb_frag_size(frag);
  1411. sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
  1412. swqe->descriptors++;
  1413. }
  1414. }
  1415. }
  1416. static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
  1417. {
  1418. int ret = 0;
  1419. u64 hret;
  1420. u8 reg_type;
  1421. /* De/Register untagged packets */
  1422. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
  1423. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1424. port->logical_port_id,
  1425. reg_type, port->mac_addr, 0, hcallid);
  1426. if (hret != H_SUCCESS) {
  1427. pr_err("%sregistering bc address failed (tagged)\n",
  1428. hcallid == H_REG_BCMC ? "" : "de");
  1429. ret = -EIO;
  1430. goto out_herr;
  1431. }
  1432. /* De/Register VLAN packets */
  1433. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
  1434. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1435. port->logical_port_id,
  1436. reg_type, port->mac_addr, 0, hcallid);
  1437. if (hret != H_SUCCESS) {
  1438. pr_err("%sregistering bc address failed (vlan)\n",
  1439. hcallid == H_REG_BCMC ? "" : "de");
  1440. ret = -EIO;
  1441. }
  1442. out_herr:
  1443. return ret;
  1444. }
  1445. static int ehea_set_mac_addr(struct net_device *dev, void *sa)
  1446. {
  1447. struct ehea_port *port = netdev_priv(dev);
  1448. struct sockaddr *mac_addr = sa;
  1449. struct hcp_ehea_port_cb0 *cb0;
  1450. int ret;
  1451. u64 hret;
  1452. if (!is_valid_ether_addr(mac_addr->sa_data)) {
  1453. ret = -EADDRNOTAVAIL;
  1454. goto out;
  1455. }
  1456. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1457. if (!cb0) {
  1458. pr_err("no mem for cb0\n");
  1459. ret = -ENOMEM;
  1460. goto out;
  1461. }
  1462. memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
  1463. cb0->port_mac_addr = cb0->port_mac_addr >> 16;
  1464. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1465. port->logical_port_id, H_PORT_CB0,
  1466. EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
  1467. if (hret != H_SUCCESS) {
  1468. ret = -EIO;
  1469. goto out_free;
  1470. }
  1471. memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
  1472. /* Deregister old MAC in pHYP */
  1473. if (port->state == EHEA_PORT_UP) {
  1474. ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1475. if (ret)
  1476. goto out_upregs;
  1477. }
  1478. port->mac_addr = cb0->port_mac_addr << 16;
  1479. /* Register new MAC in pHYP */
  1480. if (port->state == EHEA_PORT_UP) {
  1481. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1482. if (ret)
  1483. goto out_upregs;
  1484. }
  1485. ret = 0;
  1486. out_upregs:
  1487. ehea_update_bcmc_registrations();
  1488. out_free:
  1489. free_page((unsigned long)cb0);
  1490. out:
  1491. return ret;
  1492. }
  1493. static void ehea_promiscuous_error(u64 hret, int enable)
  1494. {
  1495. if (hret == H_AUTHORITY)
  1496. pr_info("Hypervisor denied %sabling promiscuous mode\n",
  1497. enable == 1 ? "en" : "dis");
  1498. else
  1499. pr_err("failed %sabling promiscuous mode\n",
  1500. enable == 1 ? "en" : "dis");
  1501. }
  1502. static void ehea_promiscuous(struct net_device *dev, int enable)
  1503. {
  1504. struct ehea_port *port = netdev_priv(dev);
  1505. struct hcp_ehea_port_cb7 *cb7;
  1506. u64 hret;
  1507. if (enable == port->promisc)
  1508. return;
  1509. cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
  1510. if (!cb7) {
  1511. pr_err("no mem for cb7\n");
  1512. goto out;
  1513. }
  1514. /* Modify Pxs_DUCQPN in CB7 */
  1515. cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
  1516. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1517. port->logical_port_id,
  1518. H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
  1519. if (hret) {
  1520. ehea_promiscuous_error(hret, enable);
  1521. goto out;
  1522. }
  1523. port->promisc = enable;
  1524. out:
  1525. free_page((unsigned long)cb7);
  1526. }
  1527. static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
  1528. u32 hcallid)
  1529. {
  1530. u64 hret;
  1531. u8 reg_type;
  1532. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1533. | EHEA_BCMC_UNTAGGED;
  1534. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1535. port->logical_port_id,
  1536. reg_type, mc_mac_addr, 0, hcallid);
  1537. if (hret)
  1538. goto out;
  1539. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1540. | EHEA_BCMC_VLANID_ALL;
  1541. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1542. port->logical_port_id,
  1543. reg_type, mc_mac_addr, 0, hcallid);
  1544. out:
  1545. return hret;
  1546. }
  1547. static int ehea_drop_multicast_list(struct net_device *dev)
  1548. {
  1549. struct ehea_port *port = netdev_priv(dev);
  1550. struct ehea_mc_list *mc_entry = port->mc_list;
  1551. struct list_head *pos;
  1552. struct list_head *temp;
  1553. int ret = 0;
  1554. u64 hret;
  1555. list_for_each_safe(pos, temp, &(port->mc_list->list)) {
  1556. mc_entry = list_entry(pos, struct ehea_mc_list, list);
  1557. hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
  1558. H_DEREG_BCMC);
  1559. if (hret) {
  1560. pr_err("failed deregistering mcast MAC\n");
  1561. ret = -EIO;
  1562. }
  1563. list_del(pos);
  1564. kfree(mc_entry);
  1565. }
  1566. return ret;
  1567. }
  1568. static void ehea_allmulti(struct net_device *dev, int enable)
  1569. {
  1570. struct ehea_port *port = netdev_priv(dev);
  1571. u64 hret;
  1572. if (!port->allmulti) {
  1573. if (enable) {
  1574. /* Enable ALLMULTI */
  1575. ehea_drop_multicast_list(dev);
  1576. hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
  1577. if (!hret)
  1578. port->allmulti = 1;
  1579. else
  1580. netdev_err(dev,
  1581. "failed enabling IFF_ALLMULTI\n");
  1582. }
  1583. } else
  1584. if (!enable) {
  1585. /* Disable ALLMULTI */
  1586. hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
  1587. if (!hret)
  1588. port->allmulti = 0;
  1589. else
  1590. netdev_err(dev,
  1591. "failed disabling IFF_ALLMULTI\n");
  1592. }
  1593. }
  1594. static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
  1595. {
  1596. struct ehea_mc_list *ehea_mcl_entry;
  1597. u64 hret;
  1598. ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
  1599. if (!ehea_mcl_entry) {
  1600. pr_err("no mem for mcl_entry\n");
  1601. return;
  1602. }
  1603. INIT_LIST_HEAD(&ehea_mcl_entry->list);
  1604. memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
  1605. hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
  1606. H_REG_BCMC);
  1607. if (!hret)
  1608. list_add(&ehea_mcl_entry->list, &port->mc_list->list);
  1609. else {
  1610. pr_err("failed registering mcast MAC\n");
  1611. kfree(ehea_mcl_entry);
  1612. }
  1613. }
  1614. static void ehea_set_multicast_list(struct net_device *dev)
  1615. {
  1616. struct ehea_port *port = netdev_priv(dev);
  1617. struct netdev_hw_addr *ha;
  1618. int ret;
  1619. if (port->promisc) {
  1620. ehea_promiscuous(dev, 1);
  1621. return;
  1622. }
  1623. ehea_promiscuous(dev, 0);
  1624. if (dev->flags & IFF_ALLMULTI) {
  1625. ehea_allmulti(dev, 1);
  1626. goto out;
  1627. }
  1628. ehea_allmulti(dev, 0);
  1629. if (!netdev_mc_empty(dev)) {
  1630. ret = ehea_drop_multicast_list(dev);
  1631. if (ret) {
  1632. /* Dropping the current multicast list failed.
  1633. * Enabling ALL_MULTI is the best we can do.
  1634. */
  1635. ehea_allmulti(dev, 1);
  1636. }
  1637. if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
  1638. pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
  1639. port->adapter->max_mc_mac);
  1640. goto out;
  1641. }
  1642. netdev_for_each_mc_addr(ha, dev)
  1643. ehea_add_multicast_entry(port, ha->addr);
  1644. }
  1645. out:
  1646. ehea_update_bcmc_registrations();
  1647. }
  1648. static int ehea_change_mtu(struct net_device *dev, int new_mtu)
  1649. {
  1650. if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
  1651. return -EINVAL;
  1652. dev->mtu = new_mtu;
  1653. return 0;
  1654. }
  1655. static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
  1656. {
  1657. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
  1658. if (skb->protocol != htons(ETH_P_IP))
  1659. return;
  1660. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1661. swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
  1662. swqe->ip_start = skb_network_offset(skb);
  1663. swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
  1664. switch (ip_hdr(skb)->protocol) {
  1665. case IPPROTO_UDP:
  1666. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1667. swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
  1668. swqe->tcp_offset = swqe->ip_end + 1 +
  1669. offsetof(struct udphdr, check);
  1670. break;
  1671. case IPPROTO_TCP:
  1672. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1673. swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
  1674. swqe->tcp_offset = swqe->ip_end + 1 +
  1675. offsetof(struct tcphdr, check);
  1676. break;
  1677. }
  1678. }
  1679. static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
  1680. struct ehea_swqe *swqe, u32 lkey)
  1681. {
  1682. swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
  1683. xmit_common(skb, swqe);
  1684. write_swqe2_data(skb, dev, swqe, lkey);
  1685. }
  1686. static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
  1687. struct ehea_swqe *swqe)
  1688. {
  1689. u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
  1690. xmit_common(skb, swqe);
  1691. if (!skb->data_len)
  1692. skb_copy_from_linear_data(skb, imm_data, skb->len);
  1693. else
  1694. skb_copy_bits(skb, 0, imm_data, skb->len);
  1695. swqe->immediate_data_length = skb->len;
  1696. dev_kfree_skb(skb);
  1697. }
  1698. static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1699. {
  1700. struct ehea_port *port = netdev_priv(dev);
  1701. struct ehea_swqe *swqe;
  1702. u32 lkey;
  1703. int swqe_index;
  1704. struct ehea_port_res *pr;
  1705. struct netdev_queue *txq;
  1706. pr = &port->port_res[skb_get_queue_mapping(skb)];
  1707. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  1708. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  1709. memset(swqe, 0, SWQE_HEADER_SIZE);
  1710. atomic_dec(&pr->swqe_avail);
  1711. if (vlan_tx_tag_present(skb)) {
  1712. swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
  1713. swqe->vlan_tag = vlan_tx_tag_get(skb);
  1714. }
  1715. pr->tx_packets++;
  1716. pr->tx_bytes += skb->len;
  1717. if (skb->len <= SWQE3_MAX_IMM) {
  1718. u32 sig_iv = port->sig_comp_iv;
  1719. u32 swqe_num = pr->swqe_id_counter;
  1720. ehea_xmit3(skb, dev, swqe);
  1721. swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
  1722. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
  1723. if (pr->swqe_ll_count >= (sig_iv - 1)) {
  1724. swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
  1725. sig_iv);
  1726. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1727. pr->swqe_ll_count = 0;
  1728. } else
  1729. pr->swqe_ll_count += 1;
  1730. } else {
  1731. swqe->wr_id =
  1732. EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
  1733. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
  1734. | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
  1735. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
  1736. pr->sq_skba.arr[pr->sq_skba.index] = skb;
  1737. pr->sq_skba.index++;
  1738. pr->sq_skba.index &= (pr->sq_skba.len - 1);
  1739. lkey = pr->send_mr.lkey;
  1740. ehea_xmit2(skb, dev, swqe, lkey);
  1741. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1742. }
  1743. pr->swqe_id_counter += 1;
  1744. netif_info(port, tx_queued, dev,
  1745. "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
  1746. if (netif_msg_tx_queued(port))
  1747. ehea_dump(swqe, 512, "swqe");
  1748. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  1749. netif_tx_stop_queue(txq);
  1750. swqe->tx_control |= EHEA_SWQE_PURGE;
  1751. }
  1752. ehea_post_swqe(pr->qp, swqe);
  1753. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1754. pr->p_stats.queue_stopped++;
  1755. netif_tx_stop_queue(txq);
  1756. }
  1757. return NETDEV_TX_OK;
  1758. }
  1759. static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  1760. {
  1761. struct ehea_port *port = netdev_priv(dev);
  1762. struct ehea_adapter *adapter = port->adapter;
  1763. struct hcp_ehea_port_cb1 *cb1;
  1764. int index;
  1765. u64 hret;
  1766. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1767. if (!cb1) {
  1768. pr_err("no mem for cb1\n");
  1769. goto out;
  1770. }
  1771. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1772. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1773. if (hret != H_SUCCESS) {
  1774. pr_err("query_ehea_port failed\n");
  1775. goto out;
  1776. }
  1777. index = (vid / 64);
  1778. cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1779. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1780. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1781. if (hret != H_SUCCESS)
  1782. pr_err("modify_ehea_port failed\n");
  1783. out:
  1784. free_page((unsigned long)cb1);
  1785. return;
  1786. }
  1787. static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  1788. {
  1789. struct ehea_port *port = netdev_priv(dev);
  1790. struct ehea_adapter *adapter = port->adapter;
  1791. struct hcp_ehea_port_cb1 *cb1;
  1792. int index;
  1793. u64 hret;
  1794. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1795. if (!cb1) {
  1796. pr_err("no mem for cb1\n");
  1797. goto out;
  1798. }
  1799. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1800. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1801. if (hret != H_SUCCESS) {
  1802. pr_err("query_ehea_port failed\n");
  1803. goto out;
  1804. }
  1805. index = (vid / 64);
  1806. cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1807. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1808. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1809. if (hret != H_SUCCESS)
  1810. pr_err("modify_ehea_port failed\n");
  1811. out:
  1812. free_page((unsigned long)cb1);
  1813. }
  1814. int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
  1815. {
  1816. int ret = -EIO;
  1817. u64 hret;
  1818. u16 dummy16 = 0;
  1819. u64 dummy64 = 0;
  1820. struct hcp_modify_qp_cb0 *cb0;
  1821. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1822. if (!cb0) {
  1823. ret = -ENOMEM;
  1824. goto out;
  1825. }
  1826. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1827. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1828. if (hret != H_SUCCESS) {
  1829. pr_err("query_ehea_qp failed (1)\n");
  1830. goto out;
  1831. }
  1832. cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
  1833. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1834. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1835. &dummy64, &dummy64, &dummy16, &dummy16);
  1836. if (hret != H_SUCCESS) {
  1837. pr_err("modify_ehea_qp failed (1)\n");
  1838. goto out;
  1839. }
  1840. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1841. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1842. if (hret != H_SUCCESS) {
  1843. pr_err("query_ehea_qp failed (2)\n");
  1844. goto out;
  1845. }
  1846. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
  1847. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1848. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1849. &dummy64, &dummy64, &dummy16, &dummy16);
  1850. if (hret != H_SUCCESS) {
  1851. pr_err("modify_ehea_qp failed (2)\n");
  1852. goto out;
  1853. }
  1854. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1855. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1856. if (hret != H_SUCCESS) {
  1857. pr_err("query_ehea_qp failed (3)\n");
  1858. goto out;
  1859. }
  1860. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
  1861. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1862. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1863. &dummy64, &dummy64, &dummy16, &dummy16);
  1864. if (hret != H_SUCCESS) {
  1865. pr_err("modify_ehea_qp failed (3)\n");
  1866. goto out;
  1867. }
  1868. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1869. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1870. if (hret != H_SUCCESS) {
  1871. pr_err("query_ehea_qp failed (4)\n");
  1872. goto out;
  1873. }
  1874. ret = 0;
  1875. out:
  1876. free_page((unsigned long)cb0);
  1877. return ret;
  1878. }
  1879. static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
  1880. {
  1881. int ret, i;
  1882. struct port_res_cfg pr_cfg, pr_cfg_small_rx;
  1883. enum ehea_eq_type eq_type = EHEA_EQ;
  1884. port->qp_eq = ehea_create_eq(port->adapter, eq_type,
  1885. EHEA_MAX_ENTRIES_EQ, 1);
  1886. if (!port->qp_eq) {
  1887. ret = -EINVAL;
  1888. pr_err("ehea_create_eq failed (qp_eq)\n");
  1889. goto out_kill_eq;
  1890. }
  1891. pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
  1892. pr_cfg.max_entries_scq = sq_entries * 2;
  1893. pr_cfg.max_entries_sq = sq_entries;
  1894. pr_cfg.max_entries_rq1 = rq1_entries;
  1895. pr_cfg.max_entries_rq2 = rq2_entries;
  1896. pr_cfg.max_entries_rq3 = rq3_entries;
  1897. pr_cfg_small_rx.max_entries_rcq = 1;
  1898. pr_cfg_small_rx.max_entries_scq = sq_entries;
  1899. pr_cfg_small_rx.max_entries_sq = sq_entries;
  1900. pr_cfg_small_rx.max_entries_rq1 = 1;
  1901. pr_cfg_small_rx.max_entries_rq2 = 1;
  1902. pr_cfg_small_rx.max_entries_rq3 = 1;
  1903. for (i = 0; i < def_qps; i++) {
  1904. ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
  1905. if (ret)
  1906. goto out_clean_pr;
  1907. }
  1908. for (i = def_qps; i < def_qps; i++) {
  1909. ret = ehea_init_port_res(port, &port->port_res[i],
  1910. &pr_cfg_small_rx, i);
  1911. if (ret)
  1912. goto out_clean_pr;
  1913. }
  1914. return 0;
  1915. out_clean_pr:
  1916. while (--i >= 0)
  1917. ehea_clean_portres(port, &port->port_res[i]);
  1918. out_kill_eq:
  1919. ehea_destroy_eq(port->qp_eq);
  1920. return ret;
  1921. }
  1922. static int ehea_clean_all_portres(struct ehea_port *port)
  1923. {
  1924. int ret = 0;
  1925. int i;
  1926. for (i = 0; i < port->num_def_qps; i++)
  1927. ret |= ehea_clean_portres(port, &port->port_res[i]);
  1928. ret |= ehea_destroy_eq(port->qp_eq);
  1929. return ret;
  1930. }
  1931. static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
  1932. {
  1933. if (adapter->active_ports)
  1934. return;
  1935. ehea_rem_mr(&adapter->mr);
  1936. }
  1937. static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
  1938. {
  1939. if (adapter->active_ports)
  1940. return 0;
  1941. return ehea_reg_kernel_mr(adapter, &adapter->mr);
  1942. }
  1943. static int ehea_up(struct net_device *dev)
  1944. {
  1945. int ret, i;
  1946. struct ehea_port *port = netdev_priv(dev);
  1947. if (port->state == EHEA_PORT_UP)
  1948. return 0;
  1949. ret = ehea_port_res_setup(port, port->num_def_qps);
  1950. if (ret) {
  1951. netdev_err(dev, "port_res_failed\n");
  1952. goto out;
  1953. }
  1954. /* Set default QP for this port */
  1955. ret = ehea_configure_port(port);
  1956. if (ret) {
  1957. netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
  1958. goto out_clean_pr;
  1959. }
  1960. ret = ehea_reg_interrupts(dev);
  1961. if (ret) {
  1962. netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
  1963. goto out_clean_pr;
  1964. }
  1965. for (i = 0; i < port->num_def_qps; i++) {
  1966. ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
  1967. if (ret) {
  1968. netdev_err(dev, "activate_qp failed\n");
  1969. goto out_free_irqs;
  1970. }
  1971. }
  1972. for (i = 0; i < port->num_def_qps; i++) {
  1973. ret = ehea_fill_port_res(&port->port_res[i]);
  1974. if (ret) {
  1975. netdev_err(dev, "out_free_irqs\n");
  1976. goto out_free_irqs;
  1977. }
  1978. }
  1979. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1980. if (ret) {
  1981. ret = -EIO;
  1982. goto out_free_irqs;
  1983. }
  1984. port->state = EHEA_PORT_UP;
  1985. ret = 0;
  1986. goto out;
  1987. out_free_irqs:
  1988. ehea_free_interrupts(dev);
  1989. out_clean_pr:
  1990. ehea_clean_all_portres(port);
  1991. out:
  1992. if (ret)
  1993. netdev_info(dev, "Failed starting. ret=%i\n", ret);
  1994. ehea_update_bcmc_registrations();
  1995. ehea_update_firmware_handles();
  1996. return ret;
  1997. }
  1998. static void port_napi_disable(struct ehea_port *port)
  1999. {
  2000. int i;
  2001. for (i = 0; i < port->num_def_qps; i++)
  2002. napi_disable(&port->port_res[i].napi);
  2003. }
  2004. static void port_napi_enable(struct ehea_port *port)
  2005. {
  2006. int i;
  2007. for (i = 0; i < port->num_def_qps; i++)
  2008. napi_enable(&port->port_res[i].napi);
  2009. }
  2010. static int ehea_open(struct net_device *dev)
  2011. {
  2012. int ret;
  2013. struct ehea_port *port = netdev_priv(dev);
  2014. mutex_lock(&port->port_lock);
  2015. netif_info(port, ifup, dev, "enabling port\n");
  2016. ret = ehea_up(dev);
  2017. if (!ret) {
  2018. port_napi_enable(port);
  2019. netif_tx_start_all_queues(dev);
  2020. }
  2021. mutex_unlock(&port->port_lock);
  2022. schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
  2023. return ret;
  2024. }
  2025. static int ehea_down(struct net_device *dev)
  2026. {
  2027. int ret;
  2028. struct ehea_port *port = netdev_priv(dev);
  2029. if (port->state == EHEA_PORT_DOWN)
  2030. return 0;
  2031. ehea_drop_multicast_list(dev);
  2032. ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  2033. ehea_free_interrupts(dev);
  2034. port->state = EHEA_PORT_DOWN;
  2035. ehea_update_bcmc_registrations();
  2036. ret = ehea_clean_all_portres(port);
  2037. if (ret)
  2038. netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
  2039. ehea_update_firmware_handles();
  2040. return ret;
  2041. }
  2042. static int ehea_stop(struct net_device *dev)
  2043. {
  2044. int ret;
  2045. struct ehea_port *port = netdev_priv(dev);
  2046. netif_info(port, ifdown, dev, "disabling port\n");
  2047. set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2048. cancel_work_sync(&port->reset_task);
  2049. cancel_delayed_work_sync(&port->stats_work);
  2050. mutex_lock(&port->port_lock);
  2051. netif_tx_stop_all_queues(dev);
  2052. port_napi_disable(port);
  2053. ret = ehea_down(dev);
  2054. mutex_unlock(&port->port_lock);
  2055. clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2056. return ret;
  2057. }
  2058. static void ehea_purge_sq(struct ehea_qp *orig_qp)
  2059. {
  2060. struct ehea_qp qp = *orig_qp;
  2061. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2062. struct ehea_swqe *swqe;
  2063. int wqe_index;
  2064. int i;
  2065. for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
  2066. swqe = ehea_get_swqe(&qp, &wqe_index);
  2067. swqe->tx_control |= EHEA_SWQE_PURGE;
  2068. }
  2069. }
  2070. static void ehea_flush_sq(struct ehea_port *port)
  2071. {
  2072. int i;
  2073. for (i = 0; i < port->num_def_qps; i++) {
  2074. struct ehea_port_res *pr = &port->port_res[i];
  2075. int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
  2076. int ret;
  2077. ret = wait_event_timeout(port->swqe_avail_wq,
  2078. atomic_read(&pr->swqe_avail) >= swqe_max,
  2079. msecs_to_jiffies(100));
  2080. if (!ret) {
  2081. pr_err("WARNING: sq not flushed completely\n");
  2082. break;
  2083. }
  2084. }
  2085. }
  2086. int ehea_stop_qps(struct net_device *dev)
  2087. {
  2088. struct ehea_port *port = netdev_priv(dev);
  2089. struct ehea_adapter *adapter = port->adapter;
  2090. struct hcp_modify_qp_cb0 *cb0;
  2091. int ret = -EIO;
  2092. int dret;
  2093. int i;
  2094. u64 hret;
  2095. u64 dummy64 = 0;
  2096. u16 dummy16 = 0;
  2097. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2098. if (!cb0) {
  2099. ret = -ENOMEM;
  2100. goto out;
  2101. }
  2102. for (i = 0; i < (port->num_def_qps); i++) {
  2103. struct ehea_port_res *pr = &port->port_res[i];
  2104. struct ehea_qp *qp = pr->qp;
  2105. /* Purge send queue */
  2106. ehea_purge_sq(qp);
  2107. /* Disable queue pair */
  2108. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2109. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2110. cb0);
  2111. if (hret != H_SUCCESS) {
  2112. pr_err("query_ehea_qp failed (1)\n");
  2113. goto out;
  2114. }
  2115. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2116. cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
  2117. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2118. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2119. 1), cb0, &dummy64,
  2120. &dummy64, &dummy16, &dummy16);
  2121. if (hret != H_SUCCESS) {
  2122. pr_err("modify_ehea_qp failed (1)\n");
  2123. goto out;
  2124. }
  2125. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2126. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2127. cb0);
  2128. if (hret != H_SUCCESS) {
  2129. pr_err("query_ehea_qp failed (2)\n");
  2130. goto out;
  2131. }
  2132. /* deregister shared memory regions */
  2133. dret = ehea_rem_smrs(pr);
  2134. if (dret) {
  2135. pr_err("unreg shared memory region failed\n");
  2136. goto out;
  2137. }
  2138. }
  2139. ret = 0;
  2140. out:
  2141. free_page((unsigned long)cb0);
  2142. return ret;
  2143. }
  2144. void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
  2145. {
  2146. struct ehea_qp qp = *orig_qp;
  2147. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2148. struct ehea_rwqe *rwqe;
  2149. struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
  2150. struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
  2151. struct sk_buff *skb;
  2152. u32 lkey = pr->recv_mr.lkey;
  2153. int i;
  2154. int index;
  2155. for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
  2156. rwqe = ehea_get_next_rwqe(&qp, 2);
  2157. rwqe->sg_list[0].l_key = lkey;
  2158. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2159. skb = skba_rq2[index];
  2160. if (skb)
  2161. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2162. }
  2163. for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
  2164. rwqe = ehea_get_next_rwqe(&qp, 3);
  2165. rwqe->sg_list[0].l_key = lkey;
  2166. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2167. skb = skba_rq3[index];
  2168. if (skb)
  2169. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2170. }
  2171. }
  2172. int ehea_restart_qps(struct net_device *dev)
  2173. {
  2174. struct ehea_port *port = netdev_priv(dev);
  2175. struct ehea_adapter *adapter = port->adapter;
  2176. int ret = 0;
  2177. int i;
  2178. struct hcp_modify_qp_cb0 *cb0;
  2179. u64 hret;
  2180. u64 dummy64 = 0;
  2181. u16 dummy16 = 0;
  2182. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2183. if (!cb0) {
  2184. ret = -ENOMEM;
  2185. goto out;
  2186. }
  2187. for (i = 0; i < (port->num_def_qps); i++) {
  2188. struct ehea_port_res *pr = &port->port_res[i];
  2189. struct ehea_qp *qp = pr->qp;
  2190. ret = ehea_gen_smrs(pr);
  2191. if (ret) {
  2192. netdev_err(dev, "creation of shared memory regions failed\n");
  2193. goto out;
  2194. }
  2195. ehea_update_rqs(qp, pr);
  2196. /* Enable queue pair */
  2197. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2198. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2199. cb0);
  2200. if (hret != H_SUCCESS) {
  2201. netdev_err(dev, "query_ehea_qp failed (1)\n");
  2202. goto out;
  2203. }
  2204. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2205. cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
  2206. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2207. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2208. 1), cb0, &dummy64,
  2209. &dummy64, &dummy16, &dummy16);
  2210. if (hret != H_SUCCESS) {
  2211. netdev_err(dev, "modify_ehea_qp failed (1)\n");
  2212. goto out;
  2213. }
  2214. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2215. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2216. cb0);
  2217. if (hret != H_SUCCESS) {
  2218. netdev_err(dev, "query_ehea_qp failed (2)\n");
  2219. goto out;
  2220. }
  2221. /* refill entire queue */
  2222. ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
  2223. ehea_refill_rq2(pr, 0);
  2224. ehea_refill_rq3(pr, 0);
  2225. }
  2226. out:
  2227. free_page((unsigned long)cb0);
  2228. return ret;
  2229. }
  2230. static void ehea_reset_port(struct work_struct *work)
  2231. {
  2232. int ret;
  2233. struct ehea_port *port =
  2234. container_of(work, struct ehea_port, reset_task);
  2235. struct net_device *dev = port->netdev;
  2236. mutex_lock(&dlpar_mem_lock);
  2237. port->resets++;
  2238. mutex_lock(&port->port_lock);
  2239. netif_tx_disable(dev);
  2240. port_napi_disable(port);
  2241. ehea_down(dev);
  2242. ret = ehea_up(dev);
  2243. if (ret)
  2244. goto out;
  2245. ehea_set_multicast_list(dev);
  2246. netif_info(port, timer, dev, "reset successful\n");
  2247. port_napi_enable(port);
  2248. netif_tx_wake_all_queues(dev);
  2249. out:
  2250. mutex_unlock(&port->port_lock);
  2251. mutex_unlock(&dlpar_mem_lock);
  2252. }
  2253. static void ehea_rereg_mrs(void)
  2254. {
  2255. int ret, i;
  2256. struct ehea_adapter *adapter;
  2257. pr_info("LPAR memory changed - re-initializing driver\n");
  2258. list_for_each_entry(adapter, &adapter_list, list)
  2259. if (adapter->active_ports) {
  2260. /* Shutdown all ports */
  2261. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2262. struct ehea_port *port = adapter->port[i];
  2263. struct net_device *dev;
  2264. if (!port)
  2265. continue;
  2266. dev = port->netdev;
  2267. if (dev->flags & IFF_UP) {
  2268. mutex_lock(&port->port_lock);
  2269. netif_tx_disable(dev);
  2270. ehea_flush_sq(port);
  2271. ret = ehea_stop_qps(dev);
  2272. if (ret) {
  2273. mutex_unlock(&port->port_lock);
  2274. goto out;
  2275. }
  2276. port_napi_disable(port);
  2277. mutex_unlock(&port->port_lock);
  2278. }
  2279. reset_sq_restart_flag(port);
  2280. }
  2281. /* Unregister old memory region */
  2282. ret = ehea_rem_mr(&adapter->mr);
  2283. if (ret) {
  2284. pr_err("unregister MR failed - driver inoperable!\n");
  2285. goto out;
  2286. }
  2287. }
  2288. clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2289. list_for_each_entry(adapter, &adapter_list, list)
  2290. if (adapter->active_ports) {
  2291. /* Register new memory region */
  2292. ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
  2293. if (ret) {
  2294. pr_err("register MR failed - driver inoperable!\n");
  2295. goto out;
  2296. }
  2297. /* Restart all ports */
  2298. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2299. struct ehea_port *port = adapter->port[i];
  2300. if (port) {
  2301. struct net_device *dev = port->netdev;
  2302. if (dev->flags & IFF_UP) {
  2303. mutex_lock(&port->port_lock);
  2304. ret = ehea_restart_qps(dev);
  2305. if (!ret) {
  2306. check_sqs(port);
  2307. port_napi_enable(port);
  2308. netif_tx_wake_all_queues(dev);
  2309. } else {
  2310. netdev_err(dev, "Unable to restart QPS\n");
  2311. }
  2312. mutex_unlock(&port->port_lock);
  2313. }
  2314. }
  2315. }
  2316. }
  2317. pr_info("re-initializing driver complete\n");
  2318. out:
  2319. return;
  2320. }
  2321. static void ehea_tx_watchdog(struct net_device *dev)
  2322. {
  2323. struct ehea_port *port = netdev_priv(dev);
  2324. if (netif_carrier_ok(dev) &&
  2325. !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
  2326. ehea_schedule_port_reset(port);
  2327. }
  2328. int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
  2329. {
  2330. struct hcp_query_ehea *cb;
  2331. u64 hret;
  2332. int ret;
  2333. cb = (void *)get_zeroed_page(GFP_KERNEL);
  2334. if (!cb) {
  2335. ret = -ENOMEM;
  2336. goto out;
  2337. }
  2338. hret = ehea_h_query_ehea(adapter->handle, cb);
  2339. if (hret != H_SUCCESS) {
  2340. ret = -EIO;
  2341. goto out_herr;
  2342. }
  2343. adapter->max_mc_mac = cb->max_mc_mac - 1;
  2344. ret = 0;
  2345. out_herr:
  2346. free_page((unsigned long)cb);
  2347. out:
  2348. return ret;
  2349. }
  2350. int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
  2351. {
  2352. struct hcp_ehea_port_cb4 *cb4;
  2353. u64 hret;
  2354. int ret = 0;
  2355. *jumbo = 0;
  2356. /* (Try to) enable *jumbo frames */
  2357. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  2358. if (!cb4) {
  2359. pr_err("no mem for cb4\n");
  2360. ret = -ENOMEM;
  2361. goto out;
  2362. } else {
  2363. hret = ehea_h_query_ehea_port(port->adapter->handle,
  2364. port->logical_port_id,
  2365. H_PORT_CB4,
  2366. H_PORT_CB4_JUMBO, cb4);
  2367. if (hret == H_SUCCESS) {
  2368. if (cb4->jumbo_frame)
  2369. *jumbo = 1;
  2370. else {
  2371. cb4->jumbo_frame = 1;
  2372. hret = ehea_h_modify_ehea_port(port->adapter->
  2373. handle,
  2374. port->
  2375. logical_port_id,
  2376. H_PORT_CB4,
  2377. H_PORT_CB4_JUMBO,
  2378. cb4);
  2379. if (hret == H_SUCCESS)
  2380. *jumbo = 1;
  2381. }
  2382. } else
  2383. ret = -EINVAL;
  2384. free_page((unsigned long)cb4);
  2385. }
  2386. out:
  2387. return ret;
  2388. }
  2389. static ssize_t ehea_show_port_id(struct device *dev,
  2390. struct device_attribute *attr, char *buf)
  2391. {
  2392. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2393. return sprintf(buf, "%d", port->logical_port_id);
  2394. }
  2395. static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
  2396. NULL);
  2397. static void __devinit logical_port_release(struct device *dev)
  2398. {
  2399. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2400. of_node_put(port->ofdev.dev.of_node);
  2401. }
  2402. static struct device *ehea_register_port(struct ehea_port *port,
  2403. struct device_node *dn)
  2404. {
  2405. int ret;
  2406. port->ofdev.dev.of_node = of_node_get(dn);
  2407. port->ofdev.dev.parent = &port->adapter->ofdev->dev;
  2408. port->ofdev.dev.bus = &ibmebus_bus_type;
  2409. dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
  2410. port->ofdev.dev.release = logical_port_release;
  2411. ret = of_device_register(&port->ofdev);
  2412. if (ret) {
  2413. pr_err("failed to register device. ret=%d\n", ret);
  2414. goto out;
  2415. }
  2416. ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2417. if (ret) {
  2418. pr_err("failed to register attributes, ret=%d\n", ret);
  2419. goto out_unreg_of_dev;
  2420. }
  2421. return &port->ofdev.dev;
  2422. out_unreg_of_dev:
  2423. of_device_unregister(&port->ofdev);
  2424. out:
  2425. return NULL;
  2426. }
  2427. static void ehea_unregister_port(struct ehea_port *port)
  2428. {
  2429. device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2430. of_device_unregister(&port->ofdev);
  2431. }
  2432. static const struct net_device_ops ehea_netdev_ops = {
  2433. .ndo_open = ehea_open,
  2434. .ndo_stop = ehea_stop,
  2435. .ndo_start_xmit = ehea_start_xmit,
  2436. #ifdef CONFIG_NET_POLL_CONTROLLER
  2437. .ndo_poll_controller = ehea_netpoll,
  2438. #endif
  2439. .ndo_get_stats64 = ehea_get_stats64,
  2440. .ndo_set_mac_address = ehea_set_mac_addr,
  2441. .ndo_validate_addr = eth_validate_addr,
  2442. .ndo_set_rx_mode = ehea_set_multicast_list,
  2443. .ndo_change_mtu = ehea_change_mtu,
  2444. .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
  2445. .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
  2446. .ndo_tx_timeout = ehea_tx_watchdog,
  2447. };
  2448. struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
  2449. u32 logical_port_id,
  2450. struct device_node *dn)
  2451. {
  2452. int ret;
  2453. struct net_device *dev;
  2454. struct ehea_port *port;
  2455. struct device *port_dev;
  2456. int jumbo;
  2457. /* allocate memory for the port structures */
  2458. dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
  2459. if (!dev) {
  2460. pr_err("no mem for net_device\n");
  2461. ret = -ENOMEM;
  2462. goto out_err;
  2463. }
  2464. port = netdev_priv(dev);
  2465. mutex_init(&port->port_lock);
  2466. port->state = EHEA_PORT_DOWN;
  2467. port->sig_comp_iv = sq_entries / 10;
  2468. port->adapter = adapter;
  2469. port->netdev = dev;
  2470. port->logical_port_id = logical_port_id;
  2471. port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
  2472. port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
  2473. if (!port->mc_list) {
  2474. ret = -ENOMEM;
  2475. goto out_free_ethdev;
  2476. }
  2477. INIT_LIST_HEAD(&port->mc_list->list);
  2478. ret = ehea_sense_port_attr(port);
  2479. if (ret)
  2480. goto out_free_mc_list;
  2481. netif_set_real_num_rx_queues(dev, port->num_def_qps);
  2482. netif_set_real_num_tx_queues(dev, port->num_def_qps);
  2483. port_dev = ehea_register_port(port, dn);
  2484. if (!port_dev)
  2485. goto out_free_mc_list;
  2486. SET_NETDEV_DEV(dev, port_dev);
  2487. /* initialize net_device structure */
  2488. memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
  2489. dev->netdev_ops = &ehea_netdev_ops;
  2490. ehea_set_ethtool_ops(dev);
  2491. dev->hw_features = NETIF_F_SG | NETIF_F_TSO
  2492. | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
  2493. dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
  2494. | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
  2495. | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
  2496. | NETIF_F_RXCSUM;
  2497. dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
  2498. NETIF_F_IP_CSUM;
  2499. dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
  2500. INIT_WORK(&port->reset_task, ehea_reset_port);
  2501. INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
  2502. init_waitqueue_head(&port->swqe_avail_wq);
  2503. init_waitqueue_head(&port->restart_wq);
  2504. memset(&port->stats, 0, sizeof(struct net_device_stats));
  2505. ret = register_netdev(dev);
  2506. if (ret) {
  2507. pr_err("register_netdev failed. ret=%d\n", ret);
  2508. goto out_unreg_port;
  2509. }
  2510. ret = ehea_get_jumboframe_status(port, &jumbo);
  2511. if (ret)
  2512. netdev_err(dev, "failed determining jumbo frame status\n");
  2513. netdev_info(dev, "Jumbo frames are %sabled\n",
  2514. jumbo == 1 ? "en" : "dis");
  2515. adapter->active_ports++;
  2516. return port;
  2517. out_unreg_port:
  2518. ehea_unregister_port(port);
  2519. out_free_mc_list:
  2520. kfree(port->mc_list);
  2521. out_free_ethdev:
  2522. free_netdev(dev);
  2523. out_err:
  2524. pr_err("setting up logical port with id=%d failed, ret=%d\n",
  2525. logical_port_id, ret);
  2526. return NULL;
  2527. }
  2528. static void ehea_shutdown_single_port(struct ehea_port *port)
  2529. {
  2530. struct ehea_adapter *adapter = port->adapter;
  2531. cancel_work_sync(&port->reset_task);
  2532. cancel_delayed_work_sync(&port->stats_work);
  2533. unregister_netdev(port->netdev);
  2534. ehea_unregister_port(port);
  2535. kfree(port->mc_list);
  2536. free_netdev(port->netdev);
  2537. adapter->active_ports--;
  2538. }
  2539. static int ehea_setup_ports(struct ehea_adapter *adapter)
  2540. {
  2541. struct device_node *lhea_dn;
  2542. struct device_node *eth_dn = NULL;
  2543. const u32 *dn_log_port_id;
  2544. int i = 0;
  2545. lhea_dn = adapter->ofdev->dev.of_node;
  2546. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2547. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2548. NULL);
  2549. if (!dn_log_port_id) {
  2550. pr_err("bad device node: eth_dn name=%s\n",
  2551. eth_dn->full_name);
  2552. continue;
  2553. }
  2554. if (ehea_add_adapter_mr(adapter)) {
  2555. pr_err("creating MR failed\n");
  2556. of_node_put(eth_dn);
  2557. return -EIO;
  2558. }
  2559. adapter->port[i] = ehea_setup_single_port(adapter,
  2560. *dn_log_port_id,
  2561. eth_dn);
  2562. if (adapter->port[i])
  2563. netdev_info(adapter->port[i]->netdev,
  2564. "logical port id #%d\n", *dn_log_port_id);
  2565. else
  2566. ehea_remove_adapter_mr(adapter);
  2567. i++;
  2568. }
  2569. return 0;
  2570. }
  2571. static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
  2572. u32 logical_port_id)
  2573. {
  2574. struct device_node *lhea_dn;
  2575. struct device_node *eth_dn = NULL;
  2576. const u32 *dn_log_port_id;
  2577. lhea_dn = adapter->ofdev->dev.of_node;
  2578. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2579. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2580. NULL);
  2581. if (dn_log_port_id)
  2582. if (*dn_log_port_id == logical_port_id)
  2583. return eth_dn;
  2584. }
  2585. return NULL;
  2586. }
  2587. static ssize_t ehea_probe_port(struct device *dev,
  2588. struct device_attribute *attr,
  2589. const char *buf, size_t count)
  2590. {
  2591. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2592. struct ehea_port *port;
  2593. struct device_node *eth_dn = NULL;
  2594. int i;
  2595. u32 logical_port_id;
  2596. sscanf(buf, "%d", &logical_port_id);
  2597. port = ehea_get_port(adapter, logical_port_id);
  2598. if (port) {
  2599. netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
  2600. logical_port_id);
  2601. return -EINVAL;
  2602. }
  2603. eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
  2604. if (!eth_dn) {
  2605. pr_info("no logical port with id %d found\n", logical_port_id);
  2606. return -EINVAL;
  2607. }
  2608. if (ehea_add_adapter_mr(adapter)) {
  2609. pr_err("creating MR failed\n");
  2610. return -EIO;
  2611. }
  2612. port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
  2613. of_node_put(eth_dn);
  2614. if (port) {
  2615. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2616. if (!adapter->port[i]) {
  2617. adapter->port[i] = port;
  2618. break;
  2619. }
  2620. netdev_info(port->netdev, "added: (logical port id=%d)\n",
  2621. logical_port_id);
  2622. } else {
  2623. ehea_remove_adapter_mr(adapter);
  2624. return -EIO;
  2625. }
  2626. return (ssize_t) count;
  2627. }
  2628. static ssize_t ehea_remove_port(struct device *dev,
  2629. struct device_attribute *attr,
  2630. const char *buf, size_t count)
  2631. {
  2632. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2633. struct ehea_port *port;
  2634. int i;
  2635. u32 logical_port_id;
  2636. sscanf(buf, "%d", &logical_port_id);
  2637. port = ehea_get_port(adapter, logical_port_id);
  2638. if (port) {
  2639. netdev_info(port->netdev, "removed: (logical port id=%d)\n",
  2640. logical_port_id);
  2641. ehea_shutdown_single_port(port);
  2642. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2643. if (adapter->port[i] == port) {
  2644. adapter->port[i] = NULL;
  2645. break;
  2646. }
  2647. } else {
  2648. pr_err("removing port with logical port id=%d failed. port not configured.\n",
  2649. logical_port_id);
  2650. return -EINVAL;
  2651. }
  2652. ehea_remove_adapter_mr(adapter);
  2653. return (ssize_t) count;
  2654. }
  2655. static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
  2656. static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
  2657. int ehea_create_device_sysfs(struct platform_device *dev)
  2658. {
  2659. int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
  2660. if (ret)
  2661. goto out;
  2662. ret = device_create_file(&dev->dev, &dev_attr_remove_port);
  2663. out:
  2664. return ret;
  2665. }
  2666. void ehea_remove_device_sysfs(struct platform_device *dev)
  2667. {
  2668. device_remove_file(&dev->dev, &dev_attr_probe_port);
  2669. device_remove_file(&dev->dev, &dev_attr_remove_port);
  2670. }
  2671. static int __devinit ehea_probe_adapter(struct platform_device *dev,
  2672. const struct of_device_id *id)
  2673. {
  2674. struct ehea_adapter *adapter;
  2675. const u64 *adapter_handle;
  2676. int ret;
  2677. if (!dev || !dev->dev.of_node) {
  2678. pr_err("Invalid ibmebus device probed\n");
  2679. return -EINVAL;
  2680. }
  2681. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  2682. if (!adapter) {
  2683. ret = -ENOMEM;
  2684. dev_err(&dev->dev, "no mem for ehea_adapter\n");
  2685. goto out;
  2686. }
  2687. list_add(&adapter->list, &adapter_list);
  2688. adapter->ofdev = dev;
  2689. adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
  2690. NULL);
  2691. if (adapter_handle)
  2692. adapter->handle = *adapter_handle;
  2693. if (!adapter->handle) {
  2694. dev_err(&dev->dev, "failed getting handle for adapter"
  2695. " '%s'\n", dev->dev.of_node->full_name);
  2696. ret = -ENODEV;
  2697. goto out_free_ad;
  2698. }
  2699. adapter->pd = EHEA_PD_ID;
  2700. dev_set_drvdata(&dev->dev, adapter);
  2701. /* initialize adapter and ports */
  2702. /* get adapter properties */
  2703. ret = ehea_sense_adapter_attr(adapter);
  2704. if (ret) {
  2705. dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
  2706. goto out_free_ad;
  2707. }
  2708. adapter->neq = ehea_create_eq(adapter,
  2709. EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
  2710. if (!adapter->neq) {
  2711. ret = -EIO;
  2712. dev_err(&dev->dev, "NEQ creation failed\n");
  2713. goto out_free_ad;
  2714. }
  2715. tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
  2716. (unsigned long)adapter);
  2717. ret = ibmebus_request_irq(adapter->neq->attr.ist1,
  2718. ehea_interrupt_neq, IRQF_DISABLED,
  2719. "ehea_neq", adapter);
  2720. if (ret) {
  2721. dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
  2722. goto out_kill_eq;
  2723. }
  2724. ret = ehea_create_device_sysfs(dev);
  2725. if (ret)
  2726. goto out_free_irq;
  2727. ret = ehea_setup_ports(adapter);
  2728. if (ret) {
  2729. dev_err(&dev->dev, "setup_ports failed\n");
  2730. goto out_rem_dev_sysfs;
  2731. }
  2732. ret = 0;
  2733. goto out;
  2734. out_rem_dev_sysfs:
  2735. ehea_remove_device_sysfs(dev);
  2736. out_free_irq:
  2737. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2738. out_kill_eq:
  2739. ehea_destroy_eq(adapter->neq);
  2740. out_free_ad:
  2741. list_del(&adapter->list);
  2742. kfree(adapter);
  2743. out:
  2744. ehea_update_firmware_handles();
  2745. return ret;
  2746. }
  2747. static int __devexit ehea_remove(struct platform_device *dev)
  2748. {
  2749. struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
  2750. int i;
  2751. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2752. if (adapter->port[i]) {
  2753. ehea_shutdown_single_port(adapter->port[i]);
  2754. adapter->port[i] = NULL;
  2755. }
  2756. ehea_remove_device_sysfs(dev);
  2757. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2758. tasklet_kill(&adapter->neq_tasklet);
  2759. ehea_destroy_eq(adapter->neq);
  2760. ehea_remove_adapter_mr(adapter);
  2761. list_del(&adapter->list);
  2762. kfree(adapter);
  2763. ehea_update_firmware_handles();
  2764. return 0;
  2765. }
  2766. void ehea_crash_handler(void)
  2767. {
  2768. int i;
  2769. if (ehea_fw_handles.arr)
  2770. for (i = 0; i < ehea_fw_handles.num_entries; i++)
  2771. ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
  2772. ehea_fw_handles.arr[i].fwh,
  2773. FORCE_FREE);
  2774. if (ehea_bcmc_regs.arr)
  2775. for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
  2776. ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
  2777. ehea_bcmc_regs.arr[i].port_id,
  2778. ehea_bcmc_regs.arr[i].reg_type,
  2779. ehea_bcmc_regs.arr[i].macaddr,
  2780. 0, H_DEREG_BCMC);
  2781. }
  2782. static int ehea_mem_notifier(struct notifier_block *nb,
  2783. unsigned long action, void *data)
  2784. {
  2785. int ret = NOTIFY_BAD;
  2786. struct memory_notify *arg = data;
  2787. mutex_lock(&dlpar_mem_lock);
  2788. switch (action) {
  2789. case MEM_CANCEL_OFFLINE:
  2790. pr_info("memory offlining canceled");
  2791. /* Readd canceled memory block */
  2792. case MEM_ONLINE:
  2793. pr_info("memory is going online");
  2794. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2795. if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
  2796. goto out_unlock;
  2797. ehea_rereg_mrs();
  2798. break;
  2799. case MEM_GOING_OFFLINE:
  2800. pr_info("memory is going offline");
  2801. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2802. if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
  2803. goto out_unlock;
  2804. ehea_rereg_mrs();
  2805. break;
  2806. default:
  2807. break;
  2808. }
  2809. ehea_update_firmware_handles();
  2810. ret = NOTIFY_OK;
  2811. out_unlock:
  2812. mutex_unlock(&dlpar_mem_lock);
  2813. return ret;
  2814. }
  2815. static struct notifier_block ehea_mem_nb = {
  2816. .notifier_call = ehea_mem_notifier,
  2817. };
  2818. static int ehea_reboot_notifier(struct notifier_block *nb,
  2819. unsigned long action, void *unused)
  2820. {
  2821. if (action == SYS_RESTART) {
  2822. pr_info("Reboot: freeing all eHEA resources\n");
  2823. ibmebus_unregister_driver(&ehea_driver);
  2824. }
  2825. return NOTIFY_DONE;
  2826. }
  2827. static struct notifier_block ehea_reboot_nb = {
  2828. .notifier_call = ehea_reboot_notifier,
  2829. };
  2830. static int check_module_parm(void)
  2831. {
  2832. int ret = 0;
  2833. if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
  2834. (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
  2835. pr_info("Bad parameter: rq1_entries\n");
  2836. ret = -EINVAL;
  2837. }
  2838. if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
  2839. (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
  2840. pr_info("Bad parameter: rq2_entries\n");
  2841. ret = -EINVAL;
  2842. }
  2843. if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
  2844. (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
  2845. pr_info("Bad parameter: rq3_entries\n");
  2846. ret = -EINVAL;
  2847. }
  2848. if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
  2849. (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
  2850. pr_info("Bad parameter: sq_entries\n");
  2851. ret = -EINVAL;
  2852. }
  2853. return ret;
  2854. }
  2855. static ssize_t ehea_show_capabilities(struct device_driver *drv,
  2856. char *buf)
  2857. {
  2858. return sprintf(buf, "%d", EHEA_CAPABILITIES);
  2859. }
  2860. static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
  2861. ehea_show_capabilities, NULL);
  2862. int __init ehea_module_init(void)
  2863. {
  2864. int ret;
  2865. pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
  2866. memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
  2867. memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
  2868. mutex_init(&ehea_fw_handles.lock);
  2869. spin_lock_init(&ehea_bcmc_regs.lock);
  2870. ret = check_module_parm();
  2871. if (ret)
  2872. goto out;
  2873. ret = ehea_create_busmap();
  2874. if (ret)
  2875. goto out;
  2876. ret = register_reboot_notifier(&ehea_reboot_nb);
  2877. if (ret)
  2878. pr_info("failed registering reboot notifier\n");
  2879. ret = register_memory_notifier(&ehea_mem_nb);
  2880. if (ret)
  2881. pr_info("failed registering memory remove notifier\n");
  2882. ret = crash_shutdown_register(ehea_crash_handler);
  2883. if (ret)
  2884. pr_info("failed registering crash handler\n");
  2885. ret = ibmebus_register_driver(&ehea_driver);
  2886. if (ret) {
  2887. pr_err("failed registering eHEA device driver on ebus\n");
  2888. goto out2;
  2889. }
  2890. ret = driver_create_file(&ehea_driver.driver,
  2891. &driver_attr_capabilities);
  2892. if (ret) {
  2893. pr_err("failed to register capabilities attribute, ret=%d\n",
  2894. ret);
  2895. goto out3;
  2896. }
  2897. return ret;
  2898. out3:
  2899. ibmebus_unregister_driver(&ehea_driver);
  2900. out2:
  2901. unregister_memory_notifier(&ehea_mem_nb);
  2902. unregister_reboot_notifier(&ehea_reboot_nb);
  2903. crash_shutdown_unregister(ehea_crash_handler);
  2904. out:
  2905. return ret;
  2906. }
  2907. static void __exit ehea_module_exit(void)
  2908. {
  2909. int ret;
  2910. driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
  2911. ibmebus_unregister_driver(&ehea_driver);
  2912. unregister_reboot_notifier(&ehea_reboot_nb);
  2913. ret = crash_shutdown_unregister(ehea_crash_handler);
  2914. if (ret)
  2915. pr_info("failed unregistering crash handler\n");
  2916. unregister_memory_notifier(&ehea_mem_nb);
  2917. kfree(ehea_fw_handles.arr);
  2918. kfree(ehea_bcmc_regs.arr);
  2919. ehea_destroy_busmap();
  2920. }
  2921. module_init(ehea_module_init);
  2922. module_exit(ehea_module_exit);