ehea_main.c 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635
  1. /*
  2. * linux/drivers/net/ehea/ehea_main.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/in.h>
  30. #include <linux/ip.h>
  31. #include <linux/tcp.h>
  32. #include <linux/udp.h>
  33. #include <linux/if.h>
  34. #include <linux/list.h>
  35. #include <linux/slab.h>
  36. #include <linux/if_ether.h>
  37. #include <linux/notifier.h>
  38. #include <linux/reboot.h>
  39. #include <linux/memory.h>
  40. #include <asm/kexec.h>
  41. #include <linux/mutex.h>
  42. #include <linux/prefetch.h>
  43. #include <net/ip.h>
  44. #include "ehea.h"
  45. #include "ehea_qmr.h"
  46. #include "ehea_phyp.h"
  47. MODULE_LICENSE("GPL");
  48. MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  49. MODULE_DESCRIPTION("IBM eServer HEA Driver");
  50. MODULE_VERSION(DRV_VERSION);
  51. static int msg_level = -1;
  52. static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  53. static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  54. static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  55. static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  56. static int use_mcs = 1;
  57. static int use_lro;
  58. static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
  59. static int prop_carrier_state;
  60. module_param(msg_level, int, 0);
  61. module_param(rq1_entries, int, 0);
  62. module_param(rq2_entries, int, 0);
  63. module_param(rq3_entries, int, 0);
  64. module_param(sq_entries, int, 0);
  65. module_param(prop_carrier_state, int, 0);
  66. module_param(use_mcs, int, 0);
  67. module_param(use_lro, int, 0);
  68. module_param(lro_max_aggr, int, 0);
  69. MODULE_PARM_DESC(msg_level, "msg_level");
  70. MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  71. "port to stack. 1:yes, 0:no. Default = 0 ");
  72. MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  73. "[2^x - 1], x = [6..14]. Default = "
  74. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  75. MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  76. "[2^x - 1], x = [6..14]. Default = "
  77. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  78. MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  79. "[2^x - 1], x = [6..14]. Default = "
  80. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  81. MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
  82. "[2^x - 1], x = [6..14]. Default = "
  83. __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  84. MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
  85. "Default = 1");
  86. MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
  87. __MODULE_STRING(EHEA_LRO_MAX_AGGR));
  88. MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
  89. "Default = 0");
  90. static int port_name_cnt;
  91. static LIST_HEAD(adapter_list);
  92. static unsigned long ehea_driver_flags;
  93. static DEFINE_MUTEX(dlpar_mem_lock);
  94. struct ehea_fw_handle_array ehea_fw_handles;
  95. struct ehea_bcmc_reg_array ehea_bcmc_regs;
  96. static int __devinit ehea_probe_adapter(struct platform_device *dev,
  97. const struct of_device_id *id);
  98. static int __devexit ehea_remove(struct platform_device *dev);
  99. static struct of_device_id ehea_device_table[] = {
  100. {
  101. .name = "lhea",
  102. .compatible = "IBM,lhea",
  103. },
  104. {},
  105. };
  106. MODULE_DEVICE_TABLE(of, ehea_device_table);
  107. static struct of_platform_driver ehea_driver = {
  108. .driver = {
  109. .name = "ehea",
  110. .owner = THIS_MODULE,
  111. .of_match_table = ehea_device_table,
  112. },
  113. .probe = ehea_probe_adapter,
  114. .remove = ehea_remove,
  115. };
  116. void ehea_dump(void *adr, int len, char *msg)
  117. {
  118. int x;
  119. unsigned char *deb = adr;
  120. for (x = 0; x < len; x += 16) {
  121. pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
  122. msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
  123. deb += 16;
  124. }
  125. }
  126. void ehea_schedule_port_reset(struct ehea_port *port)
  127. {
  128. if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
  129. schedule_work(&port->reset_task);
  130. }
  131. static void ehea_update_firmware_handles(void)
  132. {
  133. struct ehea_fw_handle_entry *arr = NULL;
  134. struct ehea_adapter *adapter;
  135. int num_adapters = 0;
  136. int num_ports = 0;
  137. int num_portres = 0;
  138. int i = 0;
  139. int num_fw_handles, k, l;
  140. /* Determine number of handles */
  141. mutex_lock(&ehea_fw_handles.lock);
  142. list_for_each_entry(adapter, &adapter_list, list) {
  143. num_adapters++;
  144. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  145. struct ehea_port *port = adapter->port[k];
  146. if (!port || (port->state != EHEA_PORT_UP))
  147. continue;
  148. num_ports++;
  149. num_portres += port->num_def_qps;
  150. }
  151. }
  152. num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
  153. num_ports * EHEA_NUM_PORT_FW_HANDLES +
  154. num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
  155. if (num_fw_handles) {
  156. arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
  157. if (!arr)
  158. goto out; /* Keep the existing array */
  159. } else
  160. goto out_update;
  161. list_for_each_entry(adapter, &adapter_list, list) {
  162. if (num_adapters == 0)
  163. break;
  164. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  165. struct ehea_port *port = adapter->port[k];
  166. if (!port || (port->state != EHEA_PORT_UP) ||
  167. (num_ports == 0))
  168. continue;
  169. for (l = 0; l < port->num_def_qps; l++) {
  170. struct ehea_port_res *pr = &port->port_res[l];
  171. arr[i].adh = adapter->handle;
  172. arr[i++].fwh = pr->qp->fw_handle;
  173. arr[i].adh = adapter->handle;
  174. arr[i++].fwh = pr->send_cq->fw_handle;
  175. arr[i].adh = adapter->handle;
  176. arr[i++].fwh = pr->recv_cq->fw_handle;
  177. arr[i].adh = adapter->handle;
  178. arr[i++].fwh = pr->eq->fw_handle;
  179. arr[i].adh = adapter->handle;
  180. arr[i++].fwh = pr->send_mr.handle;
  181. arr[i].adh = adapter->handle;
  182. arr[i++].fwh = pr->recv_mr.handle;
  183. }
  184. arr[i].adh = adapter->handle;
  185. arr[i++].fwh = port->qp_eq->fw_handle;
  186. num_ports--;
  187. }
  188. arr[i].adh = adapter->handle;
  189. arr[i++].fwh = adapter->neq->fw_handle;
  190. if (adapter->mr.handle) {
  191. arr[i].adh = adapter->handle;
  192. arr[i++].fwh = adapter->mr.handle;
  193. }
  194. num_adapters--;
  195. }
  196. out_update:
  197. kfree(ehea_fw_handles.arr);
  198. ehea_fw_handles.arr = arr;
  199. ehea_fw_handles.num_entries = i;
  200. out:
  201. mutex_unlock(&ehea_fw_handles.lock);
  202. }
  203. static void ehea_update_bcmc_registrations(void)
  204. {
  205. unsigned long flags;
  206. struct ehea_bcmc_reg_entry *arr = NULL;
  207. struct ehea_adapter *adapter;
  208. struct ehea_mc_list *mc_entry;
  209. int num_registrations = 0;
  210. int i = 0;
  211. int k;
  212. spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
  213. /* Determine number of registrations */
  214. list_for_each_entry(adapter, &adapter_list, list)
  215. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  216. struct ehea_port *port = adapter->port[k];
  217. if (!port || (port->state != EHEA_PORT_UP))
  218. continue;
  219. num_registrations += 2; /* Broadcast registrations */
  220. list_for_each_entry(mc_entry, &port->mc_list->list,list)
  221. num_registrations += 2;
  222. }
  223. if (num_registrations) {
  224. arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
  225. if (!arr)
  226. goto out; /* Keep the existing array */
  227. } else
  228. goto out_update;
  229. list_for_each_entry(adapter, &adapter_list, list) {
  230. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  231. struct ehea_port *port = adapter->port[k];
  232. if (!port || (port->state != EHEA_PORT_UP))
  233. continue;
  234. if (num_registrations == 0)
  235. goto out_update;
  236. arr[i].adh = adapter->handle;
  237. arr[i].port_id = port->logical_port_id;
  238. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  239. EHEA_BCMC_UNTAGGED;
  240. arr[i++].macaddr = port->mac_addr;
  241. arr[i].adh = adapter->handle;
  242. arr[i].port_id = port->logical_port_id;
  243. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  244. EHEA_BCMC_VLANID_ALL;
  245. arr[i++].macaddr = port->mac_addr;
  246. num_registrations -= 2;
  247. list_for_each_entry(mc_entry,
  248. &port->mc_list->list, list) {
  249. if (num_registrations == 0)
  250. goto out_update;
  251. arr[i].adh = adapter->handle;
  252. arr[i].port_id = port->logical_port_id;
  253. arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
  254. EHEA_BCMC_MULTICAST |
  255. EHEA_BCMC_UNTAGGED;
  256. arr[i++].macaddr = mc_entry->macaddr;
  257. arr[i].adh = adapter->handle;
  258. arr[i].port_id = port->logical_port_id;
  259. arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
  260. EHEA_BCMC_MULTICAST |
  261. EHEA_BCMC_VLANID_ALL;
  262. arr[i++].macaddr = mc_entry->macaddr;
  263. num_registrations -= 2;
  264. }
  265. }
  266. }
  267. out_update:
  268. kfree(ehea_bcmc_regs.arr);
  269. ehea_bcmc_regs.arr = arr;
  270. ehea_bcmc_regs.num_entries = i;
  271. out:
  272. spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
  273. }
  274. static struct net_device_stats *ehea_get_stats(struct net_device *dev)
  275. {
  276. struct ehea_port *port = netdev_priv(dev);
  277. struct net_device_stats *stats = &port->stats;
  278. u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
  279. int i;
  280. for (i = 0; i < port->num_def_qps; i++) {
  281. rx_packets += port->port_res[i].rx_packets;
  282. rx_bytes += port->port_res[i].rx_bytes;
  283. }
  284. for (i = 0; i < port->num_def_qps; i++) {
  285. tx_packets += port->port_res[i].tx_packets;
  286. tx_bytes += port->port_res[i].tx_bytes;
  287. }
  288. stats->tx_packets = tx_packets;
  289. stats->rx_bytes = rx_bytes;
  290. stats->tx_bytes = tx_bytes;
  291. stats->rx_packets = rx_packets;
  292. return &port->stats;
  293. }
  294. static void ehea_update_stats(struct work_struct *work)
  295. {
  296. struct ehea_port *port =
  297. container_of(work, struct ehea_port, stats_work.work);
  298. struct net_device *dev = port->netdev;
  299. struct net_device_stats *stats = &port->stats;
  300. struct hcp_ehea_port_cb2 *cb2;
  301. u64 hret;
  302. cb2 = (void *)get_zeroed_page(GFP_KERNEL);
  303. if (!cb2) {
  304. netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
  305. goto resched;
  306. }
  307. hret = ehea_h_query_ehea_port(port->adapter->handle,
  308. port->logical_port_id,
  309. H_PORT_CB2, H_PORT_CB2_ALL, cb2);
  310. if (hret != H_SUCCESS) {
  311. netdev_err(dev, "query_ehea_port failed\n");
  312. goto out_herr;
  313. }
  314. if (netif_msg_hw(port))
  315. ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
  316. stats->multicast = cb2->rxmcp;
  317. stats->rx_errors = cb2->rxuerr;
  318. out_herr:
  319. free_page((unsigned long)cb2);
  320. resched:
  321. schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
  322. }
  323. static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
  324. {
  325. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  326. struct net_device *dev = pr->port->netdev;
  327. int max_index_mask = pr->rq1_skba.len - 1;
  328. int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
  329. int adder = 0;
  330. int i;
  331. pr->rq1_skba.os_skbs = 0;
  332. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  333. if (nr_of_wqes > 0)
  334. pr->rq1_skba.index = index;
  335. pr->rq1_skba.os_skbs = fill_wqes;
  336. return;
  337. }
  338. for (i = 0; i < fill_wqes; i++) {
  339. if (!skb_arr_rq1[index]) {
  340. skb_arr_rq1[index] = netdev_alloc_skb(dev,
  341. EHEA_L_PKT_SIZE);
  342. if (!skb_arr_rq1[index]) {
  343. netdev_info(dev, "Unable to allocate enough skb in the array\n");
  344. pr->rq1_skba.os_skbs = fill_wqes - i;
  345. break;
  346. }
  347. }
  348. index--;
  349. index &= max_index_mask;
  350. adder++;
  351. }
  352. if (adder == 0)
  353. return;
  354. /* Ring doorbell */
  355. ehea_update_rq1a(pr->qp, adder);
  356. }
  357. static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
  358. {
  359. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  360. struct net_device *dev = pr->port->netdev;
  361. int i;
  362. if (nr_rq1a > pr->rq1_skba.len) {
  363. netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
  364. return;
  365. }
  366. for (i = 0; i < nr_rq1a; i++) {
  367. skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
  368. if (!skb_arr_rq1[i]) {
  369. netdev_info(dev, "Not enough memory to allocate skb array\n");
  370. break;
  371. }
  372. }
  373. /* Ring doorbell */
  374. ehea_update_rq1a(pr->qp, i - 1);
  375. }
  376. static int ehea_refill_rq_def(struct ehea_port_res *pr,
  377. struct ehea_q_skb_arr *q_skba, int rq_nr,
  378. int num_wqes, int wqe_type, int packet_size)
  379. {
  380. struct net_device *dev = pr->port->netdev;
  381. struct ehea_qp *qp = pr->qp;
  382. struct sk_buff **skb_arr = q_skba->arr;
  383. struct ehea_rwqe *rwqe;
  384. int i, index, max_index_mask, fill_wqes;
  385. int adder = 0;
  386. int ret = 0;
  387. fill_wqes = q_skba->os_skbs + num_wqes;
  388. q_skba->os_skbs = 0;
  389. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  390. q_skba->os_skbs = fill_wqes;
  391. return ret;
  392. }
  393. index = q_skba->index;
  394. max_index_mask = q_skba->len - 1;
  395. for (i = 0; i < fill_wqes; i++) {
  396. u64 tmp_addr;
  397. struct sk_buff *skb;
  398. skb = netdev_alloc_skb_ip_align(dev, packet_size);
  399. if (!skb) {
  400. q_skba->os_skbs = fill_wqes - i;
  401. if (q_skba->os_skbs == q_skba->len - 2) {
  402. netdev_info(pr->port->netdev,
  403. "rq%i ran dry - no mem for skb\n",
  404. rq_nr);
  405. ret = -ENOMEM;
  406. }
  407. break;
  408. }
  409. skb_arr[index] = skb;
  410. tmp_addr = ehea_map_vaddr(skb->data);
  411. if (tmp_addr == -1) {
  412. dev_kfree_skb(skb);
  413. q_skba->os_skbs = fill_wqes - i;
  414. ret = 0;
  415. break;
  416. }
  417. rwqe = ehea_get_next_rwqe(qp, rq_nr);
  418. rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
  419. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
  420. rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
  421. rwqe->sg_list[0].vaddr = tmp_addr;
  422. rwqe->sg_list[0].len = packet_size;
  423. rwqe->data_segments = 1;
  424. index++;
  425. index &= max_index_mask;
  426. adder++;
  427. }
  428. q_skba->index = index;
  429. if (adder == 0)
  430. goto out;
  431. /* Ring doorbell */
  432. iosync();
  433. if (rq_nr == 2)
  434. ehea_update_rq2a(pr->qp, adder);
  435. else
  436. ehea_update_rq3a(pr->qp, adder);
  437. out:
  438. return ret;
  439. }
  440. static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
  441. {
  442. return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
  443. nr_of_wqes, EHEA_RWQE2_TYPE,
  444. EHEA_RQ2_PKT_SIZE);
  445. }
  446. static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
  447. {
  448. return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
  449. nr_of_wqes, EHEA_RWQE3_TYPE,
  450. EHEA_MAX_PACKET_SIZE);
  451. }
  452. static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
  453. {
  454. *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
  455. if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
  456. return 0;
  457. if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
  458. (cqe->header_length == 0))
  459. return 0;
  460. return -EINVAL;
  461. }
  462. static inline void ehea_fill_skb(struct net_device *dev,
  463. struct sk_buff *skb, struct ehea_cqe *cqe,
  464. struct ehea_port_res *pr)
  465. {
  466. int length = cqe->num_bytes_transfered - 4; /*remove CRC */
  467. skb_put(skb, length);
  468. skb->protocol = eth_type_trans(skb, dev);
  469. /* The packet was not an IPV4 packet so a complemented checksum was
  470. calculated. The value is found in the Internet Checksum field. */
  471. if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
  472. skb->ip_summed = CHECKSUM_COMPLETE;
  473. skb->csum = csum_unfold(~cqe->inet_checksum_value);
  474. } else
  475. skb->ip_summed = CHECKSUM_UNNECESSARY;
  476. skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
  477. }
  478. static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
  479. int arr_len,
  480. struct ehea_cqe *cqe)
  481. {
  482. int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  483. struct sk_buff *skb;
  484. void *pref;
  485. int x;
  486. x = skb_index + 1;
  487. x &= (arr_len - 1);
  488. pref = skb_array[x];
  489. if (pref) {
  490. prefetchw(pref);
  491. prefetchw(pref + EHEA_CACHE_LINE);
  492. pref = (skb_array[x]->data);
  493. prefetch(pref);
  494. prefetch(pref + EHEA_CACHE_LINE);
  495. prefetch(pref + EHEA_CACHE_LINE * 2);
  496. prefetch(pref + EHEA_CACHE_LINE * 3);
  497. }
  498. skb = skb_array[skb_index];
  499. skb_array[skb_index] = NULL;
  500. return skb;
  501. }
  502. static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
  503. int arr_len, int wqe_index)
  504. {
  505. struct sk_buff *skb;
  506. void *pref;
  507. int x;
  508. x = wqe_index + 1;
  509. x &= (arr_len - 1);
  510. pref = skb_array[x];
  511. if (pref) {
  512. prefetchw(pref);
  513. prefetchw(pref + EHEA_CACHE_LINE);
  514. pref = (skb_array[x]->data);
  515. prefetchw(pref);
  516. prefetchw(pref + EHEA_CACHE_LINE);
  517. }
  518. skb = skb_array[wqe_index];
  519. skb_array[wqe_index] = NULL;
  520. return skb;
  521. }
  522. static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
  523. struct ehea_cqe *cqe, int *processed_rq2,
  524. int *processed_rq3)
  525. {
  526. struct sk_buff *skb;
  527. if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
  528. pr->p_stats.err_tcp_cksum++;
  529. if (cqe->status & EHEA_CQE_STAT_ERR_IP)
  530. pr->p_stats.err_ip_cksum++;
  531. if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
  532. pr->p_stats.err_frame_crc++;
  533. if (rq == 2) {
  534. *processed_rq2 += 1;
  535. skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
  536. dev_kfree_skb(skb);
  537. } else if (rq == 3) {
  538. *processed_rq3 += 1;
  539. skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
  540. dev_kfree_skb(skb);
  541. }
  542. if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
  543. if (netif_msg_rx_err(pr->port)) {
  544. pr_err("Critical receive error for QP %d. Resetting port.\n",
  545. pr->qp->init_attr.qp_nr);
  546. ehea_dump(cqe, sizeof(*cqe), "CQE");
  547. }
  548. ehea_schedule_port_reset(pr->port);
  549. return 1;
  550. }
  551. return 0;
  552. }
  553. static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  554. void **tcph, u64 *hdr_flags, void *priv)
  555. {
  556. struct ehea_cqe *cqe = priv;
  557. unsigned int ip_len;
  558. struct iphdr *iph;
  559. /* non tcp/udp packets */
  560. if (!cqe->header_length)
  561. return -1;
  562. /* non tcp packet */
  563. skb_reset_network_header(skb);
  564. iph = ip_hdr(skb);
  565. if (iph->protocol != IPPROTO_TCP)
  566. return -1;
  567. ip_len = ip_hdrlen(skb);
  568. skb_set_transport_header(skb, ip_len);
  569. *tcph = tcp_hdr(skb);
  570. /* check if ip header and tcp header are complete */
  571. if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
  572. return -1;
  573. *hdr_flags = LRO_IPV4 | LRO_TCP;
  574. *iphdr = iph;
  575. return 0;
  576. }
  577. static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
  578. struct sk_buff *skb)
  579. {
  580. if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
  581. __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
  582. if (skb->dev->features & NETIF_F_LRO)
  583. lro_receive_skb(&pr->lro_mgr, skb, cqe);
  584. else
  585. netif_receive_skb(skb);
  586. }
  587. static int ehea_proc_rwqes(struct net_device *dev,
  588. struct ehea_port_res *pr,
  589. int budget)
  590. {
  591. struct ehea_port *port = pr->port;
  592. struct ehea_qp *qp = pr->qp;
  593. struct ehea_cqe *cqe;
  594. struct sk_buff *skb;
  595. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  596. struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
  597. struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
  598. int skb_arr_rq1_len = pr->rq1_skba.len;
  599. int skb_arr_rq2_len = pr->rq2_skba.len;
  600. int skb_arr_rq3_len = pr->rq3_skba.len;
  601. int processed, processed_rq1, processed_rq2, processed_rq3;
  602. u64 processed_bytes = 0;
  603. int wqe_index, last_wqe_index, rq, port_reset;
  604. processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
  605. last_wqe_index = 0;
  606. cqe = ehea_poll_rq1(qp, &wqe_index);
  607. while ((processed < budget) && cqe) {
  608. ehea_inc_rq1(qp);
  609. processed_rq1++;
  610. processed++;
  611. if (netif_msg_rx_status(port))
  612. ehea_dump(cqe, sizeof(*cqe), "CQE");
  613. last_wqe_index = wqe_index;
  614. rmb();
  615. if (!ehea_check_cqe(cqe, &rq)) {
  616. if (rq == 1) {
  617. /* LL RQ1 */
  618. skb = get_skb_by_index_ll(skb_arr_rq1,
  619. skb_arr_rq1_len,
  620. wqe_index);
  621. if (unlikely(!skb)) {
  622. netif_info(port, rx_err, dev,
  623. "LL rq1: skb=NULL\n");
  624. skb = netdev_alloc_skb(dev,
  625. EHEA_L_PKT_SIZE);
  626. if (!skb) {
  627. netdev_err(dev, "Not enough memory to allocate skb\n");
  628. break;
  629. }
  630. }
  631. skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
  632. cqe->num_bytes_transfered - 4);
  633. ehea_fill_skb(dev, skb, cqe, pr);
  634. } else if (rq == 2) {
  635. /* RQ2 */
  636. skb = get_skb_by_index(skb_arr_rq2,
  637. skb_arr_rq2_len, cqe);
  638. if (unlikely(!skb)) {
  639. netif_err(port, rx_err, dev,
  640. "rq2: skb=NULL\n");
  641. break;
  642. }
  643. ehea_fill_skb(dev, skb, cqe, pr);
  644. processed_rq2++;
  645. } else {
  646. /* RQ3 */
  647. skb = get_skb_by_index(skb_arr_rq3,
  648. skb_arr_rq3_len, cqe);
  649. if (unlikely(!skb)) {
  650. netif_err(port, rx_err, dev,
  651. "rq3: skb=NULL\n");
  652. break;
  653. }
  654. ehea_fill_skb(dev, skb, cqe, pr);
  655. processed_rq3++;
  656. }
  657. processed_bytes += skb->len;
  658. ehea_proc_skb(pr, cqe, skb);
  659. } else {
  660. pr->p_stats.poll_receive_errors++;
  661. port_reset = ehea_treat_poll_error(pr, rq, cqe,
  662. &processed_rq2,
  663. &processed_rq3);
  664. if (port_reset)
  665. break;
  666. }
  667. cqe = ehea_poll_rq1(qp, &wqe_index);
  668. }
  669. if (dev->features & NETIF_F_LRO)
  670. lro_flush_all(&pr->lro_mgr);
  671. pr->rx_packets += processed;
  672. pr->rx_bytes += processed_bytes;
  673. ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
  674. ehea_refill_rq2(pr, processed_rq2);
  675. ehea_refill_rq3(pr, processed_rq3);
  676. return processed;
  677. }
  678. #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
  679. static void reset_sq_restart_flag(struct ehea_port *port)
  680. {
  681. int i;
  682. for (i = 0; i < port->num_def_qps; i++) {
  683. struct ehea_port_res *pr = &port->port_res[i];
  684. pr->sq_restart_flag = 0;
  685. }
  686. wake_up(&port->restart_wq);
  687. }
  688. static void check_sqs(struct ehea_port *port)
  689. {
  690. struct ehea_swqe *swqe;
  691. int swqe_index;
  692. int i, k;
  693. for (i = 0; i < port->num_def_qps; i++) {
  694. struct ehea_port_res *pr = &port->port_res[i];
  695. int ret;
  696. k = 0;
  697. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  698. memset(swqe, 0, SWQE_HEADER_SIZE);
  699. atomic_dec(&pr->swqe_avail);
  700. swqe->tx_control |= EHEA_SWQE_PURGE;
  701. swqe->wr_id = SWQE_RESTART_CHECK;
  702. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  703. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
  704. swqe->immediate_data_length = 80;
  705. ehea_post_swqe(pr->qp, swqe);
  706. ret = wait_event_timeout(port->restart_wq,
  707. pr->sq_restart_flag == 0,
  708. msecs_to_jiffies(100));
  709. if (!ret) {
  710. pr_err("HW/SW queues out of sync\n");
  711. ehea_schedule_port_reset(pr->port);
  712. return;
  713. }
  714. }
  715. }
  716. static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
  717. {
  718. struct sk_buff *skb;
  719. struct ehea_cq *send_cq = pr->send_cq;
  720. struct ehea_cqe *cqe;
  721. int quota = my_quota;
  722. int cqe_counter = 0;
  723. int swqe_av = 0;
  724. int index;
  725. struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
  726. pr - &pr->port->port_res[0]);
  727. cqe = ehea_poll_cq(send_cq);
  728. while (cqe && (quota > 0)) {
  729. ehea_inc_cq(send_cq);
  730. cqe_counter++;
  731. rmb();
  732. if (cqe->wr_id == SWQE_RESTART_CHECK) {
  733. pr->sq_restart_flag = 1;
  734. swqe_av++;
  735. break;
  736. }
  737. if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
  738. pr_err("Bad send completion status=0x%04X\n",
  739. cqe->status);
  740. if (netif_msg_tx_err(pr->port))
  741. ehea_dump(cqe, sizeof(*cqe), "Send CQE");
  742. if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
  743. pr_err("Resetting port\n");
  744. ehea_schedule_port_reset(pr->port);
  745. break;
  746. }
  747. }
  748. if (netif_msg_tx_done(pr->port))
  749. ehea_dump(cqe, sizeof(*cqe), "CQE");
  750. if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
  751. == EHEA_SWQE2_TYPE)) {
  752. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  753. skb = pr->sq_skba.arr[index];
  754. dev_kfree_skb(skb);
  755. pr->sq_skba.arr[index] = NULL;
  756. }
  757. swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
  758. quota--;
  759. cqe = ehea_poll_cq(send_cq);
  760. }
  761. ehea_update_feca(send_cq, cqe_counter);
  762. atomic_add(swqe_av, &pr->swqe_avail);
  763. if (unlikely(netif_tx_queue_stopped(txq) &&
  764. (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
  765. __netif_tx_lock(txq, smp_processor_id());
  766. if (netif_tx_queue_stopped(txq) &&
  767. (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
  768. netif_tx_wake_queue(txq);
  769. __netif_tx_unlock(txq);
  770. }
  771. wake_up(&pr->port->swqe_avail_wq);
  772. return cqe;
  773. }
  774. #define EHEA_POLL_MAX_CQES 65535
  775. static int ehea_poll(struct napi_struct *napi, int budget)
  776. {
  777. struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
  778. napi);
  779. struct net_device *dev = pr->port->netdev;
  780. struct ehea_cqe *cqe;
  781. struct ehea_cqe *cqe_skb = NULL;
  782. int wqe_index;
  783. int rx = 0;
  784. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  785. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  786. while (rx != budget) {
  787. napi_complete(napi);
  788. ehea_reset_cq_ep(pr->recv_cq);
  789. ehea_reset_cq_ep(pr->send_cq);
  790. ehea_reset_cq_n1(pr->recv_cq);
  791. ehea_reset_cq_n1(pr->send_cq);
  792. rmb();
  793. cqe = ehea_poll_rq1(pr->qp, &wqe_index);
  794. cqe_skb = ehea_poll_cq(pr->send_cq);
  795. if (!cqe && !cqe_skb)
  796. return rx;
  797. if (!napi_reschedule(napi))
  798. return rx;
  799. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  800. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  801. }
  802. return rx;
  803. }
  804. #ifdef CONFIG_NET_POLL_CONTROLLER
  805. static void ehea_netpoll(struct net_device *dev)
  806. {
  807. struct ehea_port *port = netdev_priv(dev);
  808. int i;
  809. for (i = 0; i < port->num_def_qps; i++)
  810. napi_schedule(&port->port_res[i].napi);
  811. }
  812. #endif
  813. static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
  814. {
  815. struct ehea_port_res *pr = param;
  816. napi_schedule(&pr->napi);
  817. return IRQ_HANDLED;
  818. }
  819. static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
  820. {
  821. struct ehea_port *port = param;
  822. struct ehea_eqe *eqe;
  823. struct ehea_qp *qp;
  824. u32 qp_token;
  825. u64 resource_type, aer, aerr;
  826. int reset_port = 0;
  827. eqe = ehea_poll_eq(port->qp_eq);
  828. while (eqe) {
  829. qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
  830. pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
  831. eqe->entry, qp_token);
  832. qp = port->port_res[qp_token].qp;
  833. resource_type = ehea_error_data(port->adapter, qp->fw_handle,
  834. &aer, &aerr);
  835. if (resource_type == EHEA_AER_RESTYPE_QP) {
  836. if ((aer & EHEA_AER_RESET_MASK) ||
  837. (aerr & EHEA_AERR_RESET_MASK))
  838. reset_port = 1;
  839. } else
  840. reset_port = 1; /* Reset in case of CQ or EQ error */
  841. eqe = ehea_poll_eq(port->qp_eq);
  842. }
  843. if (reset_port) {
  844. pr_err("Resetting port\n");
  845. ehea_schedule_port_reset(port);
  846. }
  847. return IRQ_HANDLED;
  848. }
  849. static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
  850. int logical_port)
  851. {
  852. int i;
  853. for (i = 0; i < EHEA_MAX_PORTS; i++)
  854. if (adapter->port[i])
  855. if (adapter->port[i]->logical_port_id == logical_port)
  856. return adapter->port[i];
  857. return NULL;
  858. }
  859. int ehea_sense_port_attr(struct ehea_port *port)
  860. {
  861. int ret;
  862. u64 hret;
  863. struct hcp_ehea_port_cb0 *cb0;
  864. /* may be called via ehea_neq_tasklet() */
  865. cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
  866. if (!cb0) {
  867. pr_err("no mem for cb0\n");
  868. ret = -ENOMEM;
  869. goto out;
  870. }
  871. hret = ehea_h_query_ehea_port(port->adapter->handle,
  872. port->logical_port_id, H_PORT_CB0,
  873. EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
  874. cb0);
  875. if (hret != H_SUCCESS) {
  876. ret = -EIO;
  877. goto out_free;
  878. }
  879. /* MAC address */
  880. port->mac_addr = cb0->port_mac_addr << 16;
  881. if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
  882. ret = -EADDRNOTAVAIL;
  883. goto out_free;
  884. }
  885. /* Port speed */
  886. switch (cb0->port_speed) {
  887. case H_SPEED_10M_H:
  888. port->port_speed = EHEA_SPEED_10M;
  889. port->full_duplex = 0;
  890. break;
  891. case H_SPEED_10M_F:
  892. port->port_speed = EHEA_SPEED_10M;
  893. port->full_duplex = 1;
  894. break;
  895. case H_SPEED_100M_H:
  896. port->port_speed = EHEA_SPEED_100M;
  897. port->full_duplex = 0;
  898. break;
  899. case H_SPEED_100M_F:
  900. port->port_speed = EHEA_SPEED_100M;
  901. port->full_duplex = 1;
  902. break;
  903. case H_SPEED_1G_F:
  904. port->port_speed = EHEA_SPEED_1G;
  905. port->full_duplex = 1;
  906. break;
  907. case H_SPEED_10G_F:
  908. port->port_speed = EHEA_SPEED_10G;
  909. port->full_duplex = 1;
  910. break;
  911. default:
  912. port->port_speed = 0;
  913. port->full_duplex = 0;
  914. break;
  915. }
  916. port->autoneg = 1;
  917. port->num_mcs = cb0->num_default_qps;
  918. /* Number of default QPs */
  919. if (use_mcs)
  920. port->num_def_qps = cb0->num_default_qps;
  921. else
  922. port->num_def_qps = 1;
  923. if (!port->num_def_qps) {
  924. ret = -EINVAL;
  925. goto out_free;
  926. }
  927. ret = 0;
  928. out_free:
  929. if (ret || netif_msg_probe(port))
  930. ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
  931. free_page((unsigned long)cb0);
  932. out:
  933. return ret;
  934. }
  935. int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
  936. {
  937. struct hcp_ehea_port_cb4 *cb4;
  938. u64 hret;
  939. int ret = 0;
  940. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  941. if (!cb4) {
  942. pr_err("no mem for cb4\n");
  943. ret = -ENOMEM;
  944. goto out;
  945. }
  946. cb4->port_speed = port_speed;
  947. netif_carrier_off(port->netdev);
  948. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  949. port->logical_port_id,
  950. H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
  951. if (hret == H_SUCCESS) {
  952. port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
  953. hret = ehea_h_query_ehea_port(port->adapter->handle,
  954. port->logical_port_id,
  955. H_PORT_CB4, H_PORT_CB4_SPEED,
  956. cb4);
  957. if (hret == H_SUCCESS) {
  958. switch (cb4->port_speed) {
  959. case H_SPEED_10M_H:
  960. port->port_speed = EHEA_SPEED_10M;
  961. port->full_duplex = 0;
  962. break;
  963. case H_SPEED_10M_F:
  964. port->port_speed = EHEA_SPEED_10M;
  965. port->full_duplex = 1;
  966. break;
  967. case H_SPEED_100M_H:
  968. port->port_speed = EHEA_SPEED_100M;
  969. port->full_duplex = 0;
  970. break;
  971. case H_SPEED_100M_F:
  972. port->port_speed = EHEA_SPEED_100M;
  973. port->full_duplex = 1;
  974. break;
  975. case H_SPEED_1G_F:
  976. port->port_speed = EHEA_SPEED_1G;
  977. port->full_duplex = 1;
  978. break;
  979. case H_SPEED_10G_F:
  980. port->port_speed = EHEA_SPEED_10G;
  981. port->full_duplex = 1;
  982. break;
  983. default:
  984. port->port_speed = 0;
  985. port->full_duplex = 0;
  986. break;
  987. }
  988. } else {
  989. pr_err("Failed sensing port speed\n");
  990. ret = -EIO;
  991. }
  992. } else {
  993. if (hret == H_AUTHORITY) {
  994. pr_info("Hypervisor denied setting port speed\n");
  995. ret = -EPERM;
  996. } else {
  997. ret = -EIO;
  998. pr_err("Failed setting port speed\n");
  999. }
  1000. }
  1001. if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
  1002. netif_carrier_on(port->netdev);
  1003. free_page((unsigned long)cb4);
  1004. out:
  1005. return ret;
  1006. }
  1007. static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
  1008. {
  1009. int ret;
  1010. u8 ec;
  1011. u8 portnum;
  1012. struct ehea_port *port;
  1013. struct net_device *dev;
  1014. ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
  1015. portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
  1016. port = ehea_get_port(adapter, portnum);
  1017. dev = port->netdev;
  1018. switch (ec) {
  1019. case EHEA_EC_PORTSTATE_CHG: /* port state change */
  1020. if (!port) {
  1021. netdev_err(dev, "unknown portnum %x\n", portnum);
  1022. break;
  1023. }
  1024. if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
  1025. if (!netif_carrier_ok(dev)) {
  1026. ret = ehea_sense_port_attr(port);
  1027. if (ret) {
  1028. netdev_err(dev, "failed resensing port attributes\n");
  1029. break;
  1030. }
  1031. netif_info(port, link, dev,
  1032. "Logical port up: %dMbps %s Duplex\n",
  1033. port->port_speed,
  1034. port->full_duplex == 1 ?
  1035. "Full" : "Half");
  1036. netif_carrier_on(dev);
  1037. netif_wake_queue(dev);
  1038. }
  1039. } else
  1040. if (netif_carrier_ok(dev)) {
  1041. netif_info(port, link, dev,
  1042. "Logical port down\n");
  1043. netif_carrier_off(dev);
  1044. netif_tx_disable(dev);
  1045. }
  1046. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
  1047. port->phy_link = EHEA_PHY_LINK_UP;
  1048. netif_info(port, link, dev,
  1049. "Physical port up\n");
  1050. if (prop_carrier_state)
  1051. netif_carrier_on(dev);
  1052. } else {
  1053. port->phy_link = EHEA_PHY_LINK_DOWN;
  1054. netif_info(port, link, dev,
  1055. "Physical port down\n");
  1056. if (prop_carrier_state)
  1057. netif_carrier_off(dev);
  1058. }
  1059. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
  1060. netdev_info(dev,
  1061. "External switch port is primary port\n");
  1062. else
  1063. netdev_info(dev,
  1064. "External switch port is backup port\n");
  1065. break;
  1066. case EHEA_EC_ADAPTER_MALFUNC:
  1067. netdev_err(dev, "Adapter malfunction\n");
  1068. break;
  1069. case EHEA_EC_PORT_MALFUNC:
  1070. netdev_info(dev, "Port malfunction\n");
  1071. netif_carrier_off(dev);
  1072. netif_tx_disable(dev);
  1073. break;
  1074. default:
  1075. netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
  1076. break;
  1077. }
  1078. }
  1079. static void ehea_neq_tasklet(unsigned long data)
  1080. {
  1081. struct ehea_adapter *adapter = (struct ehea_adapter *)data;
  1082. struct ehea_eqe *eqe;
  1083. u64 event_mask;
  1084. eqe = ehea_poll_eq(adapter->neq);
  1085. pr_debug("eqe=%p\n", eqe);
  1086. while (eqe) {
  1087. pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
  1088. ehea_parse_eqe(adapter, eqe->entry);
  1089. eqe = ehea_poll_eq(adapter->neq);
  1090. pr_debug("next eqe=%p\n", eqe);
  1091. }
  1092. event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
  1093. | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
  1094. | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
  1095. ehea_h_reset_events(adapter->handle,
  1096. adapter->neq->fw_handle, event_mask);
  1097. }
  1098. static irqreturn_t ehea_interrupt_neq(int irq, void *param)
  1099. {
  1100. struct ehea_adapter *adapter = param;
  1101. tasklet_hi_schedule(&adapter->neq_tasklet);
  1102. return IRQ_HANDLED;
  1103. }
  1104. static int ehea_fill_port_res(struct ehea_port_res *pr)
  1105. {
  1106. int ret;
  1107. struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
  1108. ehea_init_fill_rq1(pr, pr->rq1_skba.len);
  1109. ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
  1110. ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
  1111. return ret;
  1112. }
  1113. static int ehea_reg_interrupts(struct net_device *dev)
  1114. {
  1115. struct ehea_port *port = netdev_priv(dev);
  1116. struct ehea_port_res *pr;
  1117. int i, ret;
  1118. snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
  1119. dev->name);
  1120. ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
  1121. ehea_qp_aff_irq_handler,
  1122. IRQF_DISABLED, port->int_aff_name, port);
  1123. if (ret) {
  1124. netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
  1125. port->qp_eq->attr.ist1);
  1126. goto out_free_qpeq;
  1127. }
  1128. netif_info(port, ifup, dev,
  1129. "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
  1130. port->qp_eq->attr.ist1);
  1131. for (i = 0; i < port->num_def_qps; i++) {
  1132. pr = &port->port_res[i];
  1133. snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
  1134. "%s-queue%d", dev->name, i);
  1135. ret = ibmebus_request_irq(pr->eq->attr.ist1,
  1136. ehea_recv_irq_handler,
  1137. IRQF_DISABLED, pr->int_send_name,
  1138. pr);
  1139. if (ret) {
  1140. netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
  1141. i, pr->eq->attr.ist1);
  1142. goto out_free_req;
  1143. }
  1144. netif_info(port, ifup, dev,
  1145. "irq_handle 0x%X for function ehea_queue_int %d registered\n",
  1146. pr->eq->attr.ist1, i);
  1147. }
  1148. out:
  1149. return ret;
  1150. out_free_req:
  1151. while (--i >= 0) {
  1152. u32 ist = port->port_res[i].eq->attr.ist1;
  1153. ibmebus_free_irq(ist, &port->port_res[i]);
  1154. }
  1155. out_free_qpeq:
  1156. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1157. i = port->num_def_qps;
  1158. goto out;
  1159. }
  1160. static void ehea_free_interrupts(struct net_device *dev)
  1161. {
  1162. struct ehea_port *port = netdev_priv(dev);
  1163. struct ehea_port_res *pr;
  1164. int i;
  1165. /* send */
  1166. for (i = 0; i < port->num_def_qps; i++) {
  1167. pr = &port->port_res[i];
  1168. ibmebus_free_irq(pr->eq->attr.ist1, pr);
  1169. netif_info(port, intr, dev,
  1170. "free send irq for res %d with handle 0x%X\n",
  1171. i, pr->eq->attr.ist1);
  1172. }
  1173. /* associated events */
  1174. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1175. netif_info(port, intr, dev,
  1176. "associated event interrupt for handle 0x%X freed\n",
  1177. port->qp_eq->attr.ist1);
  1178. }
  1179. static int ehea_configure_port(struct ehea_port *port)
  1180. {
  1181. int ret, i;
  1182. u64 hret, mask;
  1183. struct hcp_ehea_port_cb0 *cb0;
  1184. ret = -ENOMEM;
  1185. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1186. if (!cb0)
  1187. goto out;
  1188. cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
  1189. | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
  1190. | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
  1191. | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
  1192. | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
  1193. PXLY_RC_VLAN_FILTER)
  1194. | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
  1195. for (i = 0; i < port->num_mcs; i++)
  1196. if (use_mcs)
  1197. cb0->default_qpn_arr[i] =
  1198. port->port_res[i].qp->init_attr.qp_nr;
  1199. else
  1200. cb0->default_qpn_arr[i] =
  1201. port->port_res[0].qp->init_attr.qp_nr;
  1202. if (netif_msg_ifup(port))
  1203. ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
  1204. mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
  1205. | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
  1206. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1207. port->logical_port_id,
  1208. H_PORT_CB0, mask, cb0);
  1209. ret = -EIO;
  1210. if (hret != H_SUCCESS)
  1211. goto out_free;
  1212. ret = 0;
  1213. out_free:
  1214. free_page((unsigned long)cb0);
  1215. out:
  1216. return ret;
  1217. }
  1218. int ehea_gen_smrs(struct ehea_port_res *pr)
  1219. {
  1220. int ret;
  1221. struct ehea_adapter *adapter = pr->port->adapter;
  1222. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
  1223. if (ret)
  1224. goto out;
  1225. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
  1226. if (ret)
  1227. goto out_free;
  1228. return 0;
  1229. out_free:
  1230. ehea_rem_mr(&pr->send_mr);
  1231. out:
  1232. pr_err("Generating SMRS failed\n");
  1233. return -EIO;
  1234. }
  1235. int ehea_rem_smrs(struct ehea_port_res *pr)
  1236. {
  1237. if ((ehea_rem_mr(&pr->send_mr)) ||
  1238. (ehea_rem_mr(&pr->recv_mr)))
  1239. return -EIO;
  1240. else
  1241. return 0;
  1242. }
  1243. static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
  1244. {
  1245. int arr_size = sizeof(void *) * max_q_entries;
  1246. q_skba->arr = vzalloc(arr_size);
  1247. if (!q_skba->arr)
  1248. return -ENOMEM;
  1249. q_skba->len = max_q_entries;
  1250. q_skba->index = 0;
  1251. q_skba->os_skbs = 0;
  1252. return 0;
  1253. }
  1254. static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
  1255. struct port_res_cfg *pr_cfg, int queue_token)
  1256. {
  1257. struct ehea_adapter *adapter = port->adapter;
  1258. enum ehea_eq_type eq_type = EHEA_EQ;
  1259. struct ehea_qp_init_attr *init_attr = NULL;
  1260. int ret = -EIO;
  1261. u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
  1262. tx_bytes = pr->tx_bytes;
  1263. tx_packets = pr->tx_packets;
  1264. rx_bytes = pr->rx_bytes;
  1265. rx_packets = pr->rx_packets;
  1266. memset(pr, 0, sizeof(struct ehea_port_res));
  1267. pr->tx_bytes = rx_bytes;
  1268. pr->tx_packets = tx_packets;
  1269. pr->rx_bytes = rx_bytes;
  1270. pr->rx_packets = rx_packets;
  1271. pr->port = port;
  1272. pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
  1273. if (!pr->eq) {
  1274. pr_err("create_eq failed (eq)\n");
  1275. goto out_free;
  1276. }
  1277. pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
  1278. pr->eq->fw_handle,
  1279. port->logical_port_id);
  1280. if (!pr->recv_cq) {
  1281. pr_err("create_cq failed (cq_recv)\n");
  1282. goto out_free;
  1283. }
  1284. pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
  1285. pr->eq->fw_handle,
  1286. port->logical_port_id);
  1287. if (!pr->send_cq) {
  1288. pr_err("create_cq failed (cq_send)\n");
  1289. goto out_free;
  1290. }
  1291. if (netif_msg_ifup(port))
  1292. pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
  1293. pr->send_cq->attr.act_nr_of_cqes,
  1294. pr->recv_cq->attr.act_nr_of_cqes);
  1295. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  1296. if (!init_attr) {
  1297. ret = -ENOMEM;
  1298. pr_err("no mem for ehea_qp_init_attr\n");
  1299. goto out_free;
  1300. }
  1301. init_attr->low_lat_rq1 = 1;
  1302. init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
  1303. init_attr->rq_count = 3;
  1304. init_attr->qp_token = queue_token;
  1305. init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
  1306. init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
  1307. init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
  1308. init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
  1309. init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
  1310. init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
  1311. init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
  1312. init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
  1313. init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
  1314. init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
  1315. init_attr->port_nr = port->logical_port_id;
  1316. init_attr->send_cq_handle = pr->send_cq->fw_handle;
  1317. init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
  1318. init_attr->aff_eq_handle = port->qp_eq->fw_handle;
  1319. pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
  1320. if (!pr->qp) {
  1321. pr_err("create_qp failed\n");
  1322. ret = -EIO;
  1323. goto out_free;
  1324. }
  1325. if (netif_msg_ifup(port))
  1326. pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
  1327. init_attr->qp_nr,
  1328. init_attr->act_nr_send_wqes,
  1329. init_attr->act_nr_rwqes_rq1,
  1330. init_attr->act_nr_rwqes_rq2,
  1331. init_attr->act_nr_rwqes_rq3);
  1332. pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
  1333. ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
  1334. ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
  1335. ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
  1336. ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
  1337. if (ret)
  1338. goto out_free;
  1339. pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
  1340. if (ehea_gen_smrs(pr) != 0) {
  1341. ret = -EIO;
  1342. goto out_free;
  1343. }
  1344. atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
  1345. kfree(init_attr);
  1346. netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
  1347. pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
  1348. pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
  1349. pr->lro_mgr.lro_arr = pr->lro_desc;
  1350. pr->lro_mgr.get_skb_header = get_skb_hdr;
  1351. pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
  1352. pr->lro_mgr.dev = port->netdev;
  1353. pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
  1354. pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
  1355. ret = 0;
  1356. goto out;
  1357. out_free:
  1358. kfree(init_attr);
  1359. vfree(pr->sq_skba.arr);
  1360. vfree(pr->rq1_skba.arr);
  1361. vfree(pr->rq2_skba.arr);
  1362. vfree(pr->rq3_skba.arr);
  1363. ehea_destroy_qp(pr->qp);
  1364. ehea_destroy_cq(pr->send_cq);
  1365. ehea_destroy_cq(pr->recv_cq);
  1366. ehea_destroy_eq(pr->eq);
  1367. out:
  1368. return ret;
  1369. }
  1370. static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
  1371. {
  1372. int ret, i;
  1373. if (pr->qp)
  1374. netif_napi_del(&pr->napi);
  1375. ret = ehea_destroy_qp(pr->qp);
  1376. if (!ret) {
  1377. ehea_destroy_cq(pr->send_cq);
  1378. ehea_destroy_cq(pr->recv_cq);
  1379. ehea_destroy_eq(pr->eq);
  1380. for (i = 0; i < pr->rq1_skba.len; i++)
  1381. if (pr->rq1_skba.arr[i])
  1382. dev_kfree_skb(pr->rq1_skba.arr[i]);
  1383. for (i = 0; i < pr->rq2_skba.len; i++)
  1384. if (pr->rq2_skba.arr[i])
  1385. dev_kfree_skb(pr->rq2_skba.arr[i]);
  1386. for (i = 0; i < pr->rq3_skba.len; i++)
  1387. if (pr->rq3_skba.arr[i])
  1388. dev_kfree_skb(pr->rq3_skba.arr[i]);
  1389. for (i = 0; i < pr->sq_skba.len; i++)
  1390. if (pr->sq_skba.arr[i])
  1391. dev_kfree_skb(pr->sq_skba.arr[i]);
  1392. vfree(pr->rq1_skba.arr);
  1393. vfree(pr->rq2_skba.arr);
  1394. vfree(pr->rq3_skba.arr);
  1395. vfree(pr->sq_skba.arr);
  1396. ret = ehea_rem_smrs(pr);
  1397. }
  1398. return ret;
  1399. }
  1400. static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
  1401. u32 lkey)
  1402. {
  1403. int skb_data_size = skb_headlen(skb);
  1404. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1405. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1406. unsigned int immediate_len = SWQE2_MAX_IMM;
  1407. swqe->descriptors = 0;
  1408. if (skb_is_gso(skb)) {
  1409. swqe->tx_control |= EHEA_SWQE_TSO;
  1410. swqe->mss = skb_shinfo(skb)->gso_size;
  1411. /*
  1412. * For TSO packets we only copy the headers into the
  1413. * immediate area.
  1414. */
  1415. immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
  1416. }
  1417. if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
  1418. skb_copy_from_linear_data(skb, imm_data, immediate_len);
  1419. swqe->immediate_data_length = immediate_len;
  1420. if (skb_data_size > immediate_len) {
  1421. sg1entry->l_key = lkey;
  1422. sg1entry->len = skb_data_size - immediate_len;
  1423. sg1entry->vaddr =
  1424. ehea_map_vaddr(skb->data + immediate_len);
  1425. swqe->descriptors++;
  1426. }
  1427. } else {
  1428. skb_copy_from_linear_data(skb, imm_data, skb_data_size);
  1429. swqe->immediate_data_length = skb_data_size;
  1430. }
  1431. }
  1432. static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
  1433. struct ehea_swqe *swqe, u32 lkey)
  1434. {
  1435. struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
  1436. skb_frag_t *frag;
  1437. int nfrags, sg1entry_contains_frag_data, i;
  1438. nfrags = skb_shinfo(skb)->nr_frags;
  1439. sg1entry = &swqe->u.immdata_desc.sg_entry;
  1440. sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
  1441. sg1entry_contains_frag_data = 0;
  1442. write_swqe2_immediate(skb, swqe, lkey);
  1443. /* write descriptors */
  1444. if (nfrags > 0) {
  1445. if (swqe->descriptors == 0) {
  1446. /* sg1entry not yet used */
  1447. frag = &skb_shinfo(skb)->frags[0];
  1448. /* copy sg1entry data */
  1449. sg1entry->l_key = lkey;
  1450. sg1entry->len = frag->size;
  1451. sg1entry->vaddr =
  1452. ehea_map_vaddr(skb_frag_address(frag));
  1453. swqe->descriptors++;
  1454. sg1entry_contains_frag_data = 1;
  1455. }
  1456. for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
  1457. frag = &skb_shinfo(skb)->frags[i];
  1458. sgentry = &sg_list[i - sg1entry_contains_frag_data];
  1459. sgentry->l_key = lkey;
  1460. sgentry->len = frag->size;
  1461. sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
  1462. swqe->descriptors++;
  1463. }
  1464. }
  1465. }
  1466. static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
  1467. {
  1468. int ret = 0;
  1469. u64 hret;
  1470. u8 reg_type;
  1471. /* De/Register untagged packets */
  1472. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
  1473. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1474. port->logical_port_id,
  1475. reg_type, port->mac_addr, 0, hcallid);
  1476. if (hret != H_SUCCESS) {
  1477. pr_err("%sregistering bc address failed (tagged)\n",
  1478. hcallid == H_REG_BCMC ? "" : "de");
  1479. ret = -EIO;
  1480. goto out_herr;
  1481. }
  1482. /* De/Register VLAN packets */
  1483. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
  1484. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1485. port->logical_port_id,
  1486. reg_type, port->mac_addr, 0, hcallid);
  1487. if (hret != H_SUCCESS) {
  1488. pr_err("%sregistering bc address failed (vlan)\n",
  1489. hcallid == H_REG_BCMC ? "" : "de");
  1490. ret = -EIO;
  1491. }
  1492. out_herr:
  1493. return ret;
  1494. }
  1495. static int ehea_set_mac_addr(struct net_device *dev, void *sa)
  1496. {
  1497. struct ehea_port *port = netdev_priv(dev);
  1498. struct sockaddr *mac_addr = sa;
  1499. struct hcp_ehea_port_cb0 *cb0;
  1500. int ret;
  1501. u64 hret;
  1502. if (!is_valid_ether_addr(mac_addr->sa_data)) {
  1503. ret = -EADDRNOTAVAIL;
  1504. goto out;
  1505. }
  1506. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1507. if (!cb0) {
  1508. pr_err("no mem for cb0\n");
  1509. ret = -ENOMEM;
  1510. goto out;
  1511. }
  1512. memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
  1513. cb0->port_mac_addr = cb0->port_mac_addr >> 16;
  1514. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1515. port->logical_port_id, H_PORT_CB0,
  1516. EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
  1517. if (hret != H_SUCCESS) {
  1518. ret = -EIO;
  1519. goto out_free;
  1520. }
  1521. memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
  1522. /* Deregister old MAC in pHYP */
  1523. if (port->state == EHEA_PORT_UP) {
  1524. ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1525. if (ret)
  1526. goto out_upregs;
  1527. }
  1528. port->mac_addr = cb0->port_mac_addr << 16;
  1529. /* Register new MAC in pHYP */
  1530. if (port->state == EHEA_PORT_UP) {
  1531. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1532. if (ret)
  1533. goto out_upregs;
  1534. }
  1535. ret = 0;
  1536. out_upregs:
  1537. ehea_update_bcmc_registrations();
  1538. out_free:
  1539. free_page((unsigned long)cb0);
  1540. out:
  1541. return ret;
  1542. }
  1543. static void ehea_promiscuous_error(u64 hret, int enable)
  1544. {
  1545. if (hret == H_AUTHORITY)
  1546. pr_info("Hypervisor denied %sabling promiscuous mode\n",
  1547. enable == 1 ? "en" : "dis");
  1548. else
  1549. pr_err("failed %sabling promiscuous mode\n",
  1550. enable == 1 ? "en" : "dis");
  1551. }
  1552. static void ehea_promiscuous(struct net_device *dev, int enable)
  1553. {
  1554. struct ehea_port *port = netdev_priv(dev);
  1555. struct hcp_ehea_port_cb7 *cb7;
  1556. u64 hret;
  1557. if (enable == port->promisc)
  1558. return;
  1559. cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
  1560. if (!cb7) {
  1561. pr_err("no mem for cb7\n");
  1562. goto out;
  1563. }
  1564. /* Modify Pxs_DUCQPN in CB7 */
  1565. cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
  1566. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1567. port->logical_port_id,
  1568. H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
  1569. if (hret) {
  1570. ehea_promiscuous_error(hret, enable);
  1571. goto out;
  1572. }
  1573. port->promisc = enable;
  1574. out:
  1575. free_page((unsigned long)cb7);
  1576. }
  1577. static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
  1578. u32 hcallid)
  1579. {
  1580. u64 hret;
  1581. u8 reg_type;
  1582. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1583. | EHEA_BCMC_UNTAGGED;
  1584. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1585. port->logical_port_id,
  1586. reg_type, mc_mac_addr, 0, hcallid);
  1587. if (hret)
  1588. goto out;
  1589. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1590. | EHEA_BCMC_VLANID_ALL;
  1591. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1592. port->logical_port_id,
  1593. reg_type, mc_mac_addr, 0, hcallid);
  1594. out:
  1595. return hret;
  1596. }
  1597. static int ehea_drop_multicast_list(struct net_device *dev)
  1598. {
  1599. struct ehea_port *port = netdev_priv(dev);
  1600. struct ehea_mc_list *mc_entry = port->mc_list;
  1601. struct list_head *pos;
  1602. struct list_head *temp;
  1603. int ret = 0;
  1604. u64 hret;
  1605. list_for_each_safe(pos, temp, &(port->mc_list->list)) {
  1606. mc_entry = list_entry(pos, struct ehea_mc_list, list);
  1607. hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
  1608. H_DEREG_BCMC);
  1609. if (hret) {
  1610. pr_err("failed deregistering mcast MAC\n");
  1611. ret = -EIO;
  1612. }
  1613. list_del(pos);
  1614. kfree(mc_entry);
  1615. }
  1616. return ret;
  1617. }
  1618. static void ehea_allmulti(struct net_device *dev, int enable)
  1619. {
  1620. struct ehea_port *port = netdev_priv(dev);
  1621. u64 hret;
  1622. if (!port->allmulti) {
  1623. if (enable) {
  1624. /* Enable ALLMULTI */
  1625. ehea_drop_multicast_list(dev);
  1626. hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
  1627. if (!hret)
  1628. port->allmulti = 1;
  1629. else
  1630. netdev_err(dev,
  1631. "failed enabling IFF_ALLMULTI\n");
  1632. }
  1633. } else
  1634. if (!enable) {
  1635. /* Disable ALLMULTI */
  1636. hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
  1637. if (!hret)
  1638. port->allmulti = 0;
  1639. else
  1640. netdev_err(dev,
  1641. "failed disabling IFF_ALLMULTI\n");
  1642. }
  1643. }
  1644. static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
  1645. {
  1646. struct ehea_mc_list *ehea_mcl_entry;
  1647. u64 hret;
  1648. ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
  1649. if (!ehea_mcl_entry) {
  1650. pr_err("no mem for mcl_entry\n");
  1651. return;
  1652. }
  1653. INIT_LIST_HEAD(&ehea_mcl_entry->list);
  1654. memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
  1655. hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
  1656. H_REG_BCMC);
  1657. if (!hret)
  1658. list_add(&ehea_mcl_entry->list, &port->mc_list->list);
  1659. else {
  1660. pr_err("failed registering mcast MAC\n");
  1661. kfree(ehea_mcl_entry);
  1662. }
  1663. }
  1664. static void ehea_set_multicast_list(struct net_device *dev)
  1665. {
  1666. struct ehea_port *port = netdev_priv(dev);
  1667. struct netdev_hw_addr *ha;
  1668. int ret;
  1669. if (port->promisc) {
  1670. ehea_promiscuous(dev, 1);
  1671. return;
  1672. }
  1673. ehea_promiscuous(dev, 0);
  1674. if (dev->flags & IFF_ALLMULTI) {
  1675. ehea_allmulti(dev, 1);
  1676. goto out;
  1677. }
  1678. ehea_allmulti(dev, 0);
  1679. if (!netdev_mc_empty(dev)) {
  1680. ret = ehea_drop_multicast_list(dev);
  1681. if (ret) {
  1682. /* Dropping the current multicast list failed.
  1683. * Enabling ALL_MULTI is the best we can do.
  1684. */
  1685. ehea_allmulti(dev, 1);
  1686. }
  1687. if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
  1688. pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
  1689. port->adapter->max_mc_mac);
  1690. goto out;
  1691. }
  1692. netdev_for_each_mc_addr(ha, dev)
  1693. ehea_add_multicast_entry(port, ha->addr);
  1694. }
  1695. out:
  1696. ehea_update_bcmc_registrations();
  1697. }
  1698. static int ehea_change_mtu(struct net_device *dev, int new_mtu)
  1699. {
  1700. if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
  1701. return -EINVAL;
  1702. dev->mtu = new_mtu;
  1703. return 0;
  1704. }
  1705. static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
  1706. {
  1707. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
  1708. if (skb->protocol != htons(ETH_P_IP))
  1709. return;
  1710. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1711. swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
  1712. swqe->ip_start = skb_network_offset(skb);
  1713. swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
  1714. switch (ip_hdr(skb)->protocol) {
  1715. case IPPROTO_UDP:
  1716. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1717. swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
  1718. swqe->tcp_offset = swqe->ip_end + 1 +
  1719. offsetof(struct udphdr, check);
  1720. swqe->tcp_end = skb->len - 1;
  1721. break;
  1722. case IPPROTO_TCP:
  1723. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1724. swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
  1725. swqe->tcp_offset = swqe->ip_end + 1 +
  1726. offsetof(struct tcphdr, check);
  1727. swqe->tcp_end = skb->len - 1;
  1728. break;
  1729. }
  1730. }
  1731. static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
  1732. struct ehea_swqe *swqe, u32 lkey)
  1733. {
  1734. swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
  1735. xmit_common(skb, swqe);
  1736. write_swqe2_data(skb, dev, swqe, lkey);
  1737. }
  1738. static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
  1739. struct ehea_swqe *swqe)
  1740. {
  1741. int nfrags = skb_shinfo(skb)->nr_frags;
  1742. u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
  1743. skb_frag_t *frag;
  1744. int i;
  1745. xmit_common(skb, swqe);
  1746. if (nfrags == 0) {
  1747. skb_copy_from_linear_data(skb, imm_data, skb->len);
  1748. } else {
  1749. skb_copy_from_linear_data(skb, imm_data,
  1750. skb_headlen(skb));
  1751. imm_data += skb_headlen(skb);
  1752. /* ... then copy data from the fragments */
  1753. for (i = 0; i < nfrags; i++) {
  1754. frag = &skb_shinfo(skb)->frags[i];
  1755. memcpy(imm_data, skb_frag_address(frag), frag->size);
  1756. imm_data += frag->size;
  1757. }
  1758. }
  1759. swqe->immediate_data_length = skb->len;
  1760. dev_kfree_skb(skb);
  1761. }
  1762. static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1763. {
  1764. struct ehea_port *port = netdev_priv(dev);
  1765. struct ehea_swqe *swqe;
  1766. u32 lkey;
  1767. int swqe_index;
  1768. struct ehea_port_res *pr;
  1769. struct netdev_queue *txq;
  1770. pr = &port->port_res[skb_get_queue_mapping(skb)];
  1771. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  1772. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  1773. memset(swqe, 0, SWQE_HEADER_SIZE);
  1774. atomic_dec(&pr->swqe_avail);
  1775. if (vlan_tx_tag_present(skb)) {
  1776. swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
  1777. swqe->vlan_tag = vlan_tx_tag_get(skb);
  1778. }
  1779. pr->tx_packets++;
  1780. pr->tx_bytes += skb->len;
  1781. if (skb->len <= SWQE3_MAX_IMM) {
  1782. u32 sig_iv = port->sig_comp_iv;
  1783. u32 swqe_num = pr->swqe_id_counter;
  1784. ehea_xmit3(skb, dev, swqe);
  1785. swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
  1786. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
  1787. if (pr->swqe_ll_count >= (sig_iv - 1)) {
  1788. swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
  1789. sig_iv);
  1790. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1791. pr->swqe_ll_count = 0;
  1792. } else
  1793. pr->swqe_ll_count += 1;
  1794. } else {
  1795. swqe->wr_id =
  1796. EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
  1797. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
  1798. | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
  1799. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
  1800. pr->sq_skba.arr[pr->sq_skba.index] = skb;
  1801. pr->sq_skba.index++;
  1802. pr->sq_skba.index &= (pr->sq_skba.len - 1);
  1803. lkey = pr->send_mr.lkey;
  1804. ehea_xmit2(skb, dev, swqe, lkey);
  1805. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1806. }
  1807. pr->swqe_id_counter += 1;
  1808. netif_info(port, tx_queued, dev,
  1809. "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
  1810. if (netif_msg_tx_queued(port))
  1811. ehea_dump(swqe, 512, "swqe");
  1812. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  1813. netif_tx_stop_queue(txq);
  1814. swqe->tx_control |= EHEA_SWQE_PURGE;
  1815. }
  1816. ehea_post_swqe(pr->qp, swqe);
  1817. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1818. pr->p_stats.queue_stopped++;
  1819. netif_tx_stop_queue(txq);
  1820. }
  1821. return NETDEV_TX_OK;
  1822. }
  1823. static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  1824. {
  1825. struct ehea_port *port = netdev_priv(dev);
  1826. struct ehea_adapter *adapter = port->adapter;
  1827. struct hcp_ehea_port_cb1 *cb1;
  1828. int index;
  1829. u64 hret;
  1830. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1831. if (!cb1) {
  1832. pr_err("no mem for cb1\n");
  1833. goto out;
  1834. }
  1835. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1836. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1837. if (hret != H_SUCCESS) {
  1838. pr_err("query_ehea_port failed\n");
  1839. goto out;
  1840. }
  1841. index = (vid / 64);
  1842. cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1843. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1844. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1845. if (hret != H_SUCCESS)
  1846. pr_err("modify_ehea_port failed\n");
  1847. out:
  1848. free_page((unsigned long)cb1);
  1849. return;
  1850. }
  1851. static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  1852. {
  1853. struct ehea_port *port = netdev_priv(dev);
  1854. struct ehea_adapter *adapter = port->adapter;
  1855. struct hcp_ehea_port_cb1 *cb1;
  1856. int index;
  1857. u64 hret;
  1858. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1859. if (!cb1) {
  1860. pr_err("no mem for cb1\n");
  1861. goto out;
  1862. }
  1863. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1864. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1865. if (hret != H_SUCCESS) {
  1866. pr_err("query_ehea_port failed\n");
  1867. goto out;
  1868. }
  1869. index = (vid / 64);
  1870. cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1871. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1872. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1873. if (hret != H_SUCCESS)
  1874. pr_err("modify_ehea_port failed\n");
  1875. out:
  1876. free_page((unsigned long)cb1);
  1877. }
  1878. int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
  1879. {
  1880. int ret = -EIO;
  1881. u64 hret;
  1882. u16 dummy16 = 0;
  1883. u64 dummy64 = 0;
  1884. struct hcp_modify_qp_cb0 *cb0;
  1885. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1886. if (!cb0) {
  1887. ret = -ENOMEM;
  1888. goto out;
  1889. }
  1890. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1891. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1892. if (hret != H_SUCCESS) {
  1893. pr_err("query_ehea_qp failed (1)\n");
  1894. goto out;
  1895. }
  1896. cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
  1897. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1898. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1899. &dummy64, &dummy64, &dummy16, &dummy16);
  1900. if (hret != H_SUCCESS) {
  1901. pr_err("modify_ehea_qp failed (1)\n");
  1902. goto out;
  1903. }
  1904. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1905. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1906. if (hret != H_SUCCESS) {
  1907. pr_err("query_ehea_qp failed (2)\n");
  1908. goto out;
  1909. }
  1910. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
  1911. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1912. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1913. &dummy64, &dummy64, &dummy16, &dummy16);
  1914. if (hret != H_SUCCESS) {
  1915. pr_err("modify_ehea_qp failed (2)\n");
  1916. goto out;
  1917. }
  1918. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1919. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1920. if (hret != H_SUCCESS) {
  1921. pr_err("query_ehea_qp failed (3)\n");
  1922. goto out;
  1923. }
  1924. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
  1925. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1926. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1927. &dummy64, &dummy64, &dummy16, &dummy16);
  1928. if (hret != H_SUCCESS) {
  1929. pr_err("modify_ehea_qp failed (3)\n");
  1930. goto out;
  1931. }
  1932. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1933. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1934. if (hret != H_SUCCESS) {
  1935. pr_err("query_ehea_qp failed (4)\n");
  1936. goto out;
  1937. }
  1938. ret = 0;
  1939. out:
  1940. free_page((unsigned long)cb0);
  1941. return ret;
  1942. }
  1943. static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
  1944. {
  1945. int ret, i;
  1946. struct port_res_cfg pr_cfg, pr_cfg_small_rx;
  1947. enum ehea_eq_type eq_type = EHEA_EQ;
  1948. port->qp_eq = ehea_create_eq(port->adapter, eq_type,
  1949. EHEA_MAX_ENTRIES_EQ, 1);
  1950. if (!port->qp_eq) {
  1951. ret = -EINVAL;
  1952. pr_err("ehea_create_eq failed (qp_eq)\n");
  1953. goto out_kill_eq;
  1954. }
  1955. pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
  1956. pr_cfg.max_entries_scq = sq_entries * 2;
  1957. pr_cfg.max_entries_sq = sq_entries;
  1958. pr_cfg.max_entries_rq1 = rq1_entries;
  1959. pr_cfg.max_entries_rq2 = rq2_entries;
  1960. pr_cfg.max_entries_rq3 = rq3_entries;
  1961. pr_cfg_small_rx.max_entries_rcq = 1;
  1962. pr_cfg_small_rx.max_entries_scq = sq_entries;
  1963. pr_cfg_small_rx.max_entries_sq = sq_entries;
  1964. pr_cfg_small_rx.max_entries_rq1 = 1;
  1965. pr_cfg_small_rx.max_entries_rq2 = 1;
  1966. pr_cfg_small_rx.max_entries_rq3 = 1;
  1967. for (i = 0; i < def_qps; i++) {
  1968. ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
  1969. if (ret)
  1970. goto out_clean_pr;
  1971. }
  1972. for (i = def_qps; i < def_qps; i++) {
  1973. ret = ehea_init_port_res(port, &port->port_res[i],
  1974. &pr_cfg_small_rx, i);
  1975. if (ret)
  1976. goto out_clean_pr;
  1977. }
  1978. return 0;
  1979. out_clean_pr:
  1980. while (--i >= 0)
  1981. ehea_clean_portres(port, &port->port_res[i]);
  1982. out_kill_eq:
  1983. ehea_destroy_eq(port->qp_eq);
  1984. return ret;
  1985. }
  1986. static int ehea_clean_all_portres(struct ehea_port *port)
  1987. {
  1988. int ret = 0;
  1989. int i;
  1990. for (i = 0; i < port->num_def_qps; i++)
  1991. ret |= ehea_clean_portres(port, &port->port_res[i]);
  1992. ret |= ehea_destroy_eq(port->qp_eq);
  1993. return ret;
  1994. }
  1995. static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
  1996. {
  1997. if (adapter->active_ports)
  1998. return;
  1999. ehea_rem_mr(&adapter->mr);
  2000. }
  2001. static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
  2002. {
  2003. if (adapter->active_ports)
  2004. return 0;
  2005. return ehea_reg_kernel_mr(adapter, &adapter->mr);
  2006. }
  2007. static int ehea_up(struct net_device *dev)
  2008. {
  2009. int ret, i;
  2010. struct ehea_port *port = netdev_priv(dev);
  2011. if (port->state == EHEA_PORT_UP)
  2012. return 0;
  2013. ret = ehea_port_res_setup(port, port->num_def_qps);
  2014. if (ret) {
  2015. netdev_err(dev, "port_res_failed\n");
  2016. goto out;
  2017. }
  2018. /* Set default QP for this port */
  2019. ret = ehea_configure_port(port);
  2020. if (ret) {
  2021. netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
  2022. goto out_clean_pr;
  2023. }
  2024. ret = ehea_reg_interrupts(dev);
  2025. if (ret) {
  2026. netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
  2027. goto out_clean_pr;
  2028. }
  2029. for (i = 0; i < port->num_def_qps; i++) {
  2030. ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
  2031. if (ret) {
  2032. netdev_err(dev, "activate_qp failed\n");
  2033. goto out_free_irqs;
  2034. }
  2035. }
  2036. for (i = 0; i < port->num_def_qps; i++) {
  2037. ret = ehea_fill_port_res(&port->port_res[i]);
  2038. if (ret) {
  2039. netdev_err(dev, "out_free_irqs\n");
  2040. goto out_free_irqs;
  2041. }
  2042. }
  2043. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  2044. if (ret) {
  2045. ret = -EIO;
  2046. goto out_free_irqs;
  2047. }
  2048. port->state = EHEA_PORT_UP;
  2049. ret = 0;
  2050. goto out;
  2051. out_free_irqs:
  2052. ehea_free_interrupts(dev);
  2053. out_clean_pr:
  2054. ehea_clean_all_portres(port);
  2055. out:
  2056. if (ret)
  2057. netdev_info(dev, "Failed starting. ret=%i\n", ret);
  2058. ehea_update_bcmc_registrations();
  2059. ehea_update_firmware_handles();
  2060. return ret;
  2061. }
  2062. static void port_napi_disable(struct ehea_port *port)
  2063. {
  2064. int i;
  2065. for (i = 0; i < port->num_def_qps; i++)
  2066. napi_disable(&port->port_res[i].napi);
  2067. }
  2068. static void port_napi_enable(struct ehea_port *port)
  2069. {
  2070. int i;
  2071. for (i = 0; i < port->num_def_qps; i++)
  2072. napi_enable(&port->port_res[i].napi);
  2073. }
  2074. static int ehea_open(struct net_device *dev)
  2075. {
  2076. int ret;
  2077. struct ehea_port *port = netdev_priv(dev);
  2078. mutex_lock(&port->port_lock);
  2079. netif_info(port, ifup, dev, "enabling port\n");
  2080. ret = ehea_up(dev);
  2081. if (!ret) {
  2082. port_napi_enable(port);
  2083. netif_tx_start_all_queues(dev);
  2084. }
  2085. mutex_unlock(&port->port_lock);
  2086. schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
  2087. return ret;
  2088. }
  2089. static int ehea_down(struct net_device *dev)
  2090. {
  2091. int ret;
  2092. struct ehea_port *port = netdev_priv(dev);
  2093. if (port->state == EHEA_PORT_DOWN)
  2094. return 0;
  2095. ehea_drop_multicast_list(dev);
  2096. ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  2097. ehea_free_interrupts(dev);
  2098. port->state = EHEA_PORT_DOWN;
  2099. ehea_update_bcmc_registrations();
  2100. ret = ehea_clean_all_portres(port);
  2101. if (ret)
  2102. netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
  2103. ehea_update_firmware_handles();
  2104. return ret;
  2105. }
  2106. static int ehea_stop(struct net_device *dev)
  2107. {
  2108. int ret;
  2109. struct ehea_port *port = netdev_priv(dev);
  2110. netif_info(port, ifdown, dev, "disabling port\n");
  2111. set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2112. cancel_work_sync(&port->reset_task);
  2113. cancel_delayed_work_sync(&port->stats_work);
  2114. mutex_lock(&port->port_lock);
  2115. netif_tx_stop_all_queues(dev);
  2116. port_napi_disable(port);
  2117. ret = ehea_down(dev);
  2118. mutex_unlock(&port->port_lock);
  2119. clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2120. return ret;
  2121. }
  2122. static void ehea_purge_sq(struct ehea_qp *orig_qp)
  2123. {
  2124. struct ehea_qp qp = *orig_qp;
  2125. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2126. struct ehea_swqe *swqe;
  2127. int wqe_index;
  2128. int i;
  2129. for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
  2130. swqe = ehea_get_swqe(&qp, &wqe_index);
  2131. swqe->tx_control |= EHEA_SWQE_PURGE;
  2132. }
  2133. }
  2134. static void ehea_flush_sq(struct ehea_port *port)
  2135. {
  2136. int i;
  2137. for (i = 0; i < port->num_def_qps; i++) {
  2138. struct ehea_port_res *pr = &port->port_res[i];
  2139. int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
  2140. int ret;
  2141. ret = wait_event_timeout(port->swqe_avail_wq,
  2142. atomic_read(&pr->swqe_avail) >= swqe_max,
  2143. msecs_to_jiffies(100));
  2144. if (!ret) {
  2145. pr_err("WARNING: sq not flushed completely\n");
  2146. break;
  2147. }
  2148. }
  2149. }
  2150. int ehea_stop_qps(struct net_device *dev)
  2151. {
  2152. struct ehea_port *port = netdev_priv(dev);
  2153. struct ehea_adapter *adapter = port->adapter;
  2154. struct hcp_modify_qp_cb0 *cb0;
  2155. int ret = -EIO;
  2156. int dret;
  2157. int i;
  2158. u64 hret;
  2159. u64 dummy64 = 0;
  2160. u16 dummy16 = 0;
  2161. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2162. if (!cb0) {
  2163. ret = -ENOMEM;
  2164. goto out;
  2165. }
  2166. for (i = 0; i < (port->num_def_qps); i++) {
  2167. struct ehea_port_res *pr = &port->port_res[i];
  2168. struct ehea_qp *qp = pr->qp;
  2169. /* Purge send queue */
  2170. ehea_purge_sq(qp);
  2171. /* Disable queue pair */
  2172. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2173. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2174. cb0);
  2175. if (hret != H_SUCCESS) {
  2176. pr_err("query_ehea_qp failed (1)\n");
  2177. goto out;
  2178. }
  2179. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2180. cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
  2181. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2182. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2183. 1), cb0, &dummy64,
  2184. &dummy64, &dummy16, &dummy16);
  2185. if (hret != H_SUCCESS) {
  2186. pr_err("modify_ehea_qp failed (1)\n");
  2187. goto out;
  2188. }
  2189. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2190. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2191. cb0);
  2192. if (hret != H_SUCCESS) {
  2193. pr_err("query_ehea_qp failed (2)\n");
  2194. goto out;
  2195. }
  2196. /* deregister shared memory regions */
  2197. dret = ehea_rem_smrs(pr);
  2198. if (dret) {
  2199. pr_err("unreg shared memory region failed\n");
  2200. goto out;
  2201. }
  2202. }
  2203. ret = 0;
  2204. out:
  2205. free_page((unsigned long)cb0);
  2206. return ret;
  2207. }
  2208. void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
  2209. {
  2210. struct ehea_qp qp = *orig_qp;
  2211. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2212. struct ehea_rwqe *rwqe;
  2213. struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
  2214. struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
  2215. struct sk_buff *skb;
  2216. u32 lkey = pr->recv_mr.lkey;
  2217. int i;
  2218. int index;
  2219. for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
  2220. rwqe = ehea_get_next_rwqe(&qp, 2);
  2221. rwqe->sg_list[0].l_key = lkey;
  2222. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2223. skb = skba_rq2[index];
  2224. if (skb)
  2225. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2226. }
  2227. for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
  2228. rwqe = ehea_get_next_rwqe(&qp, 3);
  2229. rwqe->sg_list[0].l_key = lkey;
  2230. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2231. skb = skba_rq3[index];
  2232. if (skb)
  2233. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2234. }
  2235. }
  2236. int ehea_restart_qps(struct net_device *dev)
  2237. {
  2238. struct ehea_port *port = netdev_priv(dev);
  2239. struct ehea_adapter *adapter = port->adapter;
  2240. int ret = 0;
  2241. int i;
  2242. struct hcp_modify_qp_cb0 *cb0;
  2243. u64 hret;
  2244. u64 dummy64 = 0;
  2245. u16 dummy16 = 0;
  2246. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2247. if (!cb0) {
  2248. ret = -ENOMEM;
  2249. goto out;
  2250. }
  2251. for (i = 0; i < (port->num_def_qps); i++) {
  2252. struct ehea_port_res *pr = &port->port_res[i];
  2253. struct ehea_qp *qp = pr->qp;
  2254. ret = ehea_gen_smrs(pr);
  2255. if (ret) {
  2256. netdev_err(dev, "creation of shared memory regions failed\n");
  2257. goto out;
  2258. }
  2259. ehea_update_rqs(qp, pr);
  2260. /* Enable queue pair */
  2261. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2262. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2263. cb0);
  2264. if (hret != H_SUCCESS) {
  2265. netdev_err(dev, "query_ehea_qp failed (1)\n");
  2266. goto out;
  2267. }
  2268. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2269. cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
  2270. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2271. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2272. 1), cb0, &dummy64,
  2273. &dummy64, &dummy16, &dummy16);
  2274. if (hret != H_SUCCESS) {
  2275. netdev_err(dev, "modify_ehea_qp failed (1)\n");
  2276. goto out;
  2277. }
  2278. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2279. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2280. cb0);
  2281. if (hret != H_SUCCESS) {
  2282. netdev_err(dev, "query_ehea_qp failed (2)\n");
  2283. goto out;
  2284. }
  2285. /* refill entire queue */
  2286. ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
  2287. ehea_refill_rq2(pr, 0);
  2288. ehea_refill_rq3(pr, 0);
  2289. }
  2290. out:
  2291. free_page((unsigned long)cb0);
  2292. return ret;
  2293. }
  2294. static void ehea_reset_port(struct work_struct *work)
  2295. {
  2296. int ret;
  2297. struct ehea_port *port =
  2298. container_of(work, struct ehea_port, reset_task);
  2299. struct net_device *dev = port->netdev;
  2300. mutex_lock(&dlpar_mem_lock);
  2301. port->resets++;
  2302. mutex_lock(&port->port_lock);
  2303. netif_tx_disable(dev);
  2304. port_napi_disable(port);
  2305. ehea_down(dev);
  2306. ret = ehea_up(dev);
  2307. if (ret)
  2308. goto out;
  2309. ehea_set_multicast_list(dev);
  2310. netif_info(port, timer, dev, "reset successful\n");
  2311. port_napi_enable(port);
  2312. netif_tx_wake_all_queues(dev);
  2313. out:
  2314. mutex_unlock(&port->port_lock);
  2315. mutex_unlock(&dlpar_mem_lock);
  2316. }
  2317. static void ehea_rereg_mrs(void)
  2318. {
  2319. int ret, i;
  2320. struct ehea_adapter *adapter;
  2321. pr_info("LPAR memory changed - re-initializing driver\n");
  2322. list_for_each_entry(adapter, &adapter_list, list)
  2323. if (adapter->active_ports) {
  2324. /* Shutdown all ports */
  2325. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2326. struct ehea_port *port = adapter->port[i];
  2327. struct net_device *dev;
  2328. if (!port)
  2329. continue;
  2330. dev = port->netdev;
  2331. if (dev->flags & IFF_UP) {
  2332. mutex_lock(&port->port_lock);
  2333. netif_tx_disable(dev);
  2334. ehea_flush_sq(port);
  2335. ret = ehea_stop_qps(dev);
  2336. if (ret) {
  2337. mutex_unlock(&port->port_lock);
  2338. goto out;
  2339. }
  2340. port_napi_disable(port);
  2341. mutex_unlock(&port->port_lock);
  2342. }
  2343. reset_sq_restart_flag(port);
  2344. }
  2345. /* Unregister old memory region */
  2346. ret = ehea_rem_mr(&adapter->mr);
  2347. if (ret) {
  2348. pr_err("unregister MR failed - driver inoperable!\n");
  2349. goto out;
  2350. }
  2351. }
  2352. clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2353. list_for_each_entry(adapter, &adapter_list, list)
  2354. if (adapter->active_ports) {
  2355. /* Register new memory region */
  2356. ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
  2357. if (ret) {
  2358. pr_err("register MR failed - driver inoperable!\n");
  2359. goto out;
  2360. }
  2361. /* Restart all ports */
  2362. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2363. struct ehea_port *port = adapter->port[i];
  2364. if (port) {
  2365. struct net_device *dev = port->netdev;
  2366. if (dev->flags & IFF_UP) {
  2367. mutex_lock(&port->port_lock);
  2368. ret = ehea_restart_qps(dev);
  2369. if (!ret) {
  2370. check_sqs(port);
  2371. port_napi_enable(port);
  2372. netif_tx_wake_all_queues(dev);
  2373. } else {
  2374. netdev_err(dev, "Unable to restart QPS\n");
  2375. }
  2376. mutex_unlock(&port->port_lock);
  2377. }
  2378. }
  2379. }
  2380. }
  2381. pr_info("re-initializing driver complete\n");
  2382. out:
  2383. return;
  2384. }
  2385. static void ehea_tx_watchdog(struct net_device *dev)
  2386. {
  2387. struct ehea_port *port = netdev_priv(dev);
  2388. if (netif_carrier_ok(dev) &&
  2389. !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
  2390. ehea_schedule_port_reset(port);
  2391. }
  2392. int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
  2393. {
  2394. struct hcp_query_ehea *cb;
  2395. u64 hret;
  2396. int ret;
  2397. cb = (void *)get_zeroed_page(GFP_KERNEL);
  2398. if (!cb) {
  2399. ret = -ENOMEM;
  2400. goto out;
  2401. }
  2402. hret = ehea_h_query_ehea(adapter->handle, cb);
  2403. if (hret != H_SUCCESS) {
  2404. ret = -EIO;
  2405. goto out_herr;
  2406. }
  2407. adapter->max_mc_mac = cb->max_mc_mac - 1;
  2408. ret = 0;
  2409. out_herr:
  2410. free_page((unsigned long)cb);
  2411. out:
  2412. return ret;
  2413. }
  2414. int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
  2415. {
  2416. struct hcp_ehea_port_cb4 *cb4;
  2417. u64 hret;
  2418. int ret = 0;
  2419. *jumbo = 0;
  2420. /* (Try to) enable *jumbo frames */
  2421. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  2422. if (!cb4) {
  2423. pr_err("no mem for cb4\n");
  2424. ret = -ENOMEM;
  2425. goto out;
  2426. } else {
  2427. hret = ehea_h_query_ehea_port(port->adapter->handle,
  2428. port->logical_port_id,
  2429. H_PORT_CB4,
  2430. H_PORT_CB4_JUMBO, cb4);
  2431. if (hret == H_SUCCESS) {
  2432. if (cb4->jumbo_frame)
  2433. *jumbo = 1;
  2434. else {
  2435. cb4->jumbo_frame = 1;
  2436. hret = ehea_h_modify_ehea_port(port->adapter->
  2437. handle,
  2438. port->
  2439. logical_port_id,
  2440. H_PORT_CB4,
  2441. H_PORT_CB4_JUMBO,
  2442. cb4);
  2443. if (hret == H_SUCCESS)
  2444. *jumbo = 1;
  2445. }
  2446. } else
  2447. ret = -EINVAL;
  2448. free_page((unsigned long)cb4);
  2449. }
  2450. out:
  2451. return ret;
  2452. }
  2453. static ssize_t ehea_show_port_id(struct device *dev,
  2454. struct device_attribute *attr, char *buf)
  2455. {
  2456. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2457. return sprintf(buf, "%d", port->logical_port_id);
  2458. }
  2459. static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
  2460. NULL);
  2461. static void __devinit logical_port_release(struct device *dev)
  2462. {
  2463. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2464. of_node_put(port->ofdev.dev.of_node);
  2465. }
  2466. static struct device *ehea_register_port(struct ehea_port *port,
  2467. struct device_node *dn)
  2468. {
  2469. int ret;
  2470. port->ofdev.dev.of_node = of_node_get(dn);
  2471. port->ofdev.dev.parent = &port->adapter->ofdev->dev;
  2472. port->ofdev.dev.bus = &ibmebus_bus_type;
  2473. dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
  2474. port->ofdev.dev.release = logical_port_release;
  2475. ret = of_device_register(&port->ofdev);
  2476. if (ret) {
  2477. pr_err("failed to register device. ret=%d\n", ret);
  2478. goto out;
  2479. }
  2480. ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2481. if (ret) {
  2482. pr_err("failed to register attributes, ret=%d\n", ret);
  2483. goto out_unreg_of_dev;
  2484. }
  2485. return &port->ofdev.dev;
  2486. out_unreg_of_dev:
  2487. of_device_unregister(&port->ofdev);
  2488. out:
  2489. return NULL;
  2490. }
  2491. static void ehea_unregister_port(struct ehea_port *port)
  2492. {
  2493. device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2494. of_device_unregister(&port->ofdev);
  2495. }
  2496. static const struct net_device_ops ehea_netdev_ops = {
  2497. .ndo_open = ehea_open,
  2498. .ndo_stop = ehea_stop,
  2499. .ndo_start_xmit = ehea_start_xmit,
  2500. #ifdef CONFIG_NET_POLL_CONTROLLER
  2501. .ndo_poll_controller = ehea_netpoll,
  2502. #endif
  2503. .ndo_get_stats = ehea_get_stats,
  2504. .ndo_set_mac_address = ehea_set_mac_addr,
  2505. .ndo_validate_addr = eth_validate_addr,
  2506. .ndo_set_rx_mode = ehea_set_multicast_list,
  2507. .ndo_change_mtu = ehea_change_mtu,
  2508. .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
  2509. .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
  2510. .ndo_tx_timeout = ehea_tx_watchdog,
  2511. };
  2512. struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
  2513. u32 logical_port_id,
  2514. struct device_node *dn)
  2515. {
  2516. int ret;
  2517. struct net_device *dev;
  2518. struct ehea_port *port;
  2519. struct device *port_dev;
  2520. int jumbo;
  2521. /* allocate memory for the port structures */
  2522. dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
  2523. if (!dev) {
  2524. pr_err("no mem for net_device\n");
  2525. ret = -ENOMEM;
  2526. goto out_err;
  2527. }
  2528. port = netdev_priv(dev);
  2529. mutex_init(&port->port_lock);
  2530. port->state = EHEA_PORT_DOWN;
  2531. port->sig_comp_iv = sq_entries / 10;
  2532. port->adapter = adapter;
  2533. port->netdev = dev;
  2534. port->logical_port_id = logical_port_id;
  2535. port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
  2536. port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
  2537. if (!port->mc_list) {
  2538. ret = -ENOMEM;
  2539. goto out_free_ethdev;
  2540. }
  2541. INIT_LIST_HEAD(&port->mc_list->list);
  2542. ret = ehea_sense_port_attr(port);
  2543. if (ret)
  2544. goto out_free_mc_list;
  2545. netif_set_real_num_rx_queues(dev, port->num_def_qps);
  2546. netif_set_real_num_tx_queues(dev, port->num_def_qps);
  2547. port_dev = ehea_register_port(port, dn);
  2548. if (!port_dev)
  2549. goto out_free_mc_list;
  2550. SET_NETDEV_DEV(dev, port_dev);
  2551. /* initialize net_device structure */
  2552. memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
  2553. dev->netdev_ops = &ehea_netdev_ops;
  2554. ehea_set_ethtool_ops(dev);
  2555. dev->hw_features = NETIF_F_SG | NETIF_F_TSO
  2556. | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
  2557. dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
  2558. | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
  2559. | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
  2560. | NETIF_F_RXCSUM;
  2561. dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
  2562. NETIF_F_IP_CSUM;
  2563. dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
  2564. if (use_lro)
  2565. dev->features |= NETIF_F_LRO;
  2566. INIT_WORK(&port->reset_task, ehea_reset_port);
  2567. INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
  2568. init_waitqueue_head(&port->swqe_avail_wq);
  2569. init_waitqueue_head(&port->restart_wq);
  2570. memset(&port->stats, 0, sizeof(struct net_device_stats));
  2571. ret = register_netdev(dev);
  2572. if (ret) {
  2573. pr_err("register_netdev failed. ret=%d\n", ret);
  2574. goto out_unreg_port;
  2575. }
  2576. port->lro_max_aggr = lro_max_aggr;
  2577. ret = ehea_get_jumboframe_status(port, &jumbo);
  2578. if (ret)
  2579. netdev_err(dev, "failed determining jumbo frame status\n");
  2580. netdev_info(dev, "Jumbo frames are %sabled\n",
  2581. jumbo == 1 ? "en" : "dis");
  2582. adapter->active_ports++;
  2583. return port;
  2584. out_unreg_port:
  2585. ehea_unregister_port(port);
  2586. out_free_mc_list:
  2587. kfree(port->mc_list);
  2588. out_free_ethdev:
  2589. free_netdev(dev);
  2590. out_err:
  2591. pr_err("setting up logical port with id=%d failed, ret=%d\n",
  2592. logical_port_id, ret);
  2593. return NULL;
  2594. }
  2595. static void ehea_shutdown_single_port(struct ehea_port *port)
  2596. {
  2597. struct ehea_adapter *adapter = port->adapter;
  2598. cancel_work_sync(&port->reset_task);
  2599. cancel_delayed_work_sync(&port->stats_work);
  2600. unregister_netdev(port->netdev);
  2601. ehea_unregister_port(port);
  2602. kfree(port->mc_list);
  2603. free_netdev(port->netdev);
  2604. adapter->active_ports--;
  2605. }
  2606. static int ehea_setup_ports(struct ehea_adapter *adapter)
  2607. {
  2608. struct device_node *lhea_dn;
  2609. struct device_node *eth_dn = NULL;
  2610. const u32 *dn_log_port_id;
  2611. int i = 0;
  2612. lhea_dn = adapter->ofdev->dev.of_node;
  2613. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2614. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2615. NULL);
  2616. if (!dn_log_port_id) {
  2617. pr_err("bad device node: eth_dn name=%s\n",
  2618. eth_dn->full_name);
  2619. continue;
  2620. }
  2621. if (ehea_add_adapter_mr(adapter)) {
  2622. pr_err("creating MR failed\n");
  2623. of_node_put(eth_dn);
  2624. return -EIO;
  2625. }
  2626. adapter->port[i] = ehea_setup_single_port(adapter,
  2627. *dn_log_port_id,
  2628. eth_dn);
  2629. if (adapter->port[i])
  2630. netdev_info(adapter->port[i]->netdev,
  2631. "logical port id #%d\n", *dn_log_port_id);
  2632. else
  2633. ehea_remove_adapter_mr(adapter);
  2634. i++;
  2635. }
  2636. return 0;
  2637. }
  2638. static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
  2639. u32 logical_port_id)
  2640. {
  2641. struct device_node *lhea_dn;
  2642. struct device_node *eth_dn = NULL;
  2643. const u32 *dn_log_port_id;
  2644. lhea_dn = adapter->ofdev->dev.of_node;
  2645. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2646. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2647. NULL);
  2648. if (dn_log_port_id)
  2649. if (*dn_log_port_id == logical_port_id)
  2650. return eth_dn;
  2651. }
  2652. return NULL;
  2653. }
  2654. static ssize_t ehea_probe_port(struct device *dev,
  2655. struct device_attribute *attr,
  2656. const char *buf, size_t count)
  2657. {
  2658. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2659. struct ehea_port *port;
  2660. struct device_node *eth_dn = NULL;
  2661. int i;
  2662. u32 logical_port_id;
  2663. sscanf(buf, "%d", &logical_port_id);
  2664. port = ehea_get_port(adapter, logical_port_id);
  2665. if (port) {
  2666. netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
  2667. logical_port_id);
  2668. return -EINVAL;
  2669. }
  2670. eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
  2671. if (!eth_dn) {
  2672. pr_info("no logical port with id %d found\n", logical_port_id);
  2673. return -EINVAL;
  2674. }
  2675. if (ehea_add_adapter_mr(adapter)) {
  2676. pr_err("creating MR failed\n");
  2677. return -EIO;
  2678. }
  2679. port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
  2680. of_node_put(eth_dn);
  2681. if (port) {
  2682. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2683. if (!adapter->port[i]) {
  2684. adapter->port[i] = port;
  2685. break;
  2686. }
  2687. netdev_info(port->netdev, "added: (logical port id=%d)\n",
  2688. logical_port_id);
  2689. } else {
  2690. ehea_remove_adapter_mr(adapter);
  2691. return -EIO;
  2692. }
  2693. return (ssize_t) count;
  2694. }
  2695. static ssize_t ehea_remove_port(struct device *dev,
  2696. struct device_attribute *attr,
  2697. const char *buf, size_t count)
  2698. {
  2699. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2700. struct ehea_port *port;
  2701. int i;
  2702. u32 logical_port_id;
  2703. sscanf(buf, "%d", &logical_port_id);
  2704. port = ehea_get_port(adapter, logical_port_id);
  2705. if (port) {
  2706. netdev_info(port->netdev, "removed: (logical port id=%d)\n",
  2707. logical_port_id);
  2708. ehea_shutdown_single_port(port);
  2709. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2710. if (adapter->port[i] == port) {
  2711. adapter->port[i] = NULL;
  2712. break;
  2713. }
  2714. } else {
  2715. pr_err("removing port with logical port id=%d failed. port not configured.\n",
  2716. logical_port_id);
  2717. return -EINVAL;
  2718. }
  2719. ehea_remove_adapter_mr(adapter);
  2720. return (ssize_t) count;
  2721. }
  2722. static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
  2723. static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
  2724. int ehea_create_device_sysfs(struct platform_device *dev)
  2725. {
  2726. int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
  2727. if (ret)
  2728. goto out;
  2729. ret = device_create_file(&dev->dev, &dev_attr_remove_port);
  2730. out:
  2731. return ret;
  2732. }
  2733. void ehea_remove_device_sysfs(struct platform_device *dev)
  2734. {
  2735. device_remove_file(&dev->dev, &dev_attr_probe_port);
  2736. device_remove_file(&dev->dev, &dev_attr_remove_port);
  2737. }
  2738. static int __devinit ehea_probe_adapter(struct platform_device *dev,
  2739. const struct of_device_id *id)
  2740. {
  2741. struct ehea_adapter *adapter;
  2742. const u64 *adapter_handle;
  2743. int ret;
  2744. if (!dev || !dev->dev.of_node) {
  2745. pr_err("Invalid ibmebus device probed\n");
  2746. return -EINVAL;
  2747. }
  2748. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  2749. if (!adapter) {
  2750. ret = -ENOMEM;
  2751. dev_err(&dev->dev, "no mem for ehea_adapter\n");
  2752. goto out;
  2753. }
  2754. list_add(&adapter->list, &adapter_list);
  2755. adapter->ofdev = dev;
  2756. adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
  2757. NULL);
  2758. if (adapter_handle)
  2759. adapter->handle = *adapter_handle;
  2760. if (!adapter->handle) {
  2761. dev_err(&dev->dev, "failed getting handle for adapter"
  2762. " '%s'\n", dev->dev.of_node->full_name);
  2763. ret = -ENODEV;
  2764. goto out_free_ad;
  2765. }
  2766. adapter->pd = EHEA_PD_ID;
  2767. dev_set_drvdata(&dev->dev, adapter);
  2768. /* initialize adapter and ports */
  2769. /* get adapter properties */
  2770. ret = ehea_sense_adapter_attr(adapter);
  2771. if (ret) {
  2772. dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
  2773. goto out_free_ad;
  2774. }
  2775. adapter->neq = ehea_create_eq(adapter,
  2776. EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
  2777. if (!adapter->neq) {
  2778. ret = -EIO;
  2779. dev_err(&dev->dev, "NEQ creation failed\n");
  2780. goto out_free_ad;
  2781. }
  2782. tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
  2783. (unsigned long)adapter);
  2784. ret = ibmebus_request_irq(adapter->neq->attr.ist1,
  2785. ehea_interrupt_neq, IRQF_DISABLED,
  2786. "ehea_neq", adapter);
  2787. if (ret) {
  2788. dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
  2789. goto out_kill_eq;
  2790. }
  2791. ret = ehea_create_device_sysfs(dev);
  2792. if (ret)
  2793. goto out_free_irq;
  2794. ret = ehea_setup_ports(adapter);
  2795. if (ret) {
  2796. dev_err(&dev->dev, "setup_ports failed\n");
  2797. goto out_rem_dev_sysfs;
  2798. }
  2799. ret = 0;
  2800. goto out;
  2801. out_rem_dev_sysfs:
  2802. ehea_remove_device_sysfs(dev);
  2803. out_free_irq:
  2804. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2805. out_kill_eq:
  2806. ehea_destroy_eq(adapter->neq);
  2807. out_free_ad:
  2808. list_del(&adapter->list);
  2809. kfree(adapter);
  2810. out:
  2811. ehea_update_firmware_handles();
  2812. return ret;
  2813. }
  2814. static int __devexit ehea_remove(struct platform_device *dev)
  2815. {
  2816. struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
  2817. int i;
  2818. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2819. if (adapter->port[i]) {
  2820. ehea_shutdown_single_port(adapter->port[i]);
  2821. adapter->port[i] = NULL;
  2822. }
  2823. ehea_remove_device_sysfs(dev);
  2824. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2825. tasklet_kill(&adapter->neq_tasklet);
  2826. ehea_destroy_eq(adapter->neq);
  2827. ehea_remove_adapter_mr(adapter);
  2828. list_del(&adapter->list);
  2829. kfree(adapter);
  2830. ehea_update_firmware_handles();
  2831. return 0;
  2832. }
  2833. void ehea_crash_handler(void)
  2834. {
  2835. int i;
  2836. if (ehea_fw_handles.arr)
  2837. for (i = 0; i < ehea_fw_handles.num_entries; i++)
  2838. ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
  2839. ehea_fw_handles.arr[i].fwh,
  2840. FORCE_FREE);
  2841. if (ehea_bcmc_regs.arr)
  2842. for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
  2843. ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
  2844. ehea_bcmc_regs.arr[i].port_id,
  2845. ehea_bcmc_regs.arr[i].reg_type,
  2846. ehea_bcmc_regs.arr[i].macaddr,
  2847. 0, H_DEREG_BCMC);
  2848. }
  2849. static int ehea_mem_notifier(struct notifier_block *nb,
  2850. unsigned long action, void *data)
  2851. {
  2852. int ret = NOTIFY_BAD;
  2853. struct memory_notify *arg = data;
  2854. mutex_lock(&dlpar_mem_lock);
  2855. switch (action) {
  2856. case MEM_CANCEL_OFFLINE:
  2857. pr_info("memory offlining canceled");
  2858. /* Readd canceled memory block */
  2859. case MEM_ONLINE:
  2860. pr_info("memory is going online");
  2861. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2862. if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
  2863. goto out_unlock;
  2864. ehea_rereg_mrs();
  2865. break;
  2866. case MEM_GOING_OFFLINE:
  2867. pr_info("memory is going offline");
  2868. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2869. if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
  2870. goto out_unlock;
  2871. ehea_rereg_mrs();
  2872. break;
  2873. default:
  2874. break;
  2875. }
  2876. ehea_update_firmware_handles();
  2877. ret = NOTIFY_OK;
  2878. out_unlock:
  2879. mutex_unlock(&dlpar_mem_lock);
  2880. return ret;
  2881. }
  2882. static struct notifier_block ehea_mem_nb = {
  2883. .notifier_call = ehea_mem_notifier,
  2884. };
  2885. static int ehea_reboot_notifier(struct notifier_block *nb,
  2886. unsigned long action, void *unused)
  2887. {
  2888. if (action == SYS_RESTART) {
  2889. pr_info("Reboot: freeing all eHEA resources\n");
  2890. ibmebus_unregister_driver(&ehea_driver);
  2891. }
  2892. return NOTIFY_DONE;
  2893. }
  2894. static struct notifier_block ehea_reboot_nb = {
  2895. .notifier_call = ehea_reboot_notifier,
  2896. };
  2897. static int check_module_parm(void)
  2898. {
  2899. int ret = 0;
  2900. if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
  2901. (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
  2902. pr_info("Bad parameter: rq1_entries\n");
  2903. ret = -EINVAL;
  2904. }
  2905. if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
  2906. (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
  2907. pr_info("Bad parameter: rq2_entries\n");
  2908. ret = -EINVAL;
  2909. }
  2910. if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
  2911. (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
  2912. pr_info("Bad parameter: rq3_entries\n");
  2913. ret = -EINVAL;
  2914. }
  2915. if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
  2916. (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
  2917. pr_info("Bad parameter: sq_entries\n");
  2918. ret = -EINVAL;
  2919. }
  2920. return ret;
  2921. }
  2922. static ssize_t ehea_show_capabilities(struct device_driver *drv,
  2923. char *buf)
  2924. {
  2925. return sprintf(buf, "%d", EHEA_CAPABILITIES);
  2926. }
  2927. static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
  2928. ehea_show_capabilities, NULL);
  2929. int __init ehea_module_init(void)
  2930. {
  2931. int ret;
  2932. pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
  2933. memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
  2934. memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
  2935. mutex_init(&ehea_fw_handles.lock);
  2936. spin_lock_init(&ehea_bcmc_regs.lock);
  2937. ret = check_module_parm();
  2938. if (ret)
  2939. goto out;
  2940. ret = ehea_create_busmap();
  2941. if (ret)
  2942. goto out;
  2943. ret = register_reboot_notifier(&ehea_reboot_nb);
  2944. if (ret)
  2945. pr_info("failed registering reboot notifier\n");
  2946. ret = register_memory_notifier(&ehea_mem_nb);
  2947. if (ret)
  2948. pr_info("failed registering memory remove notifier\n");
  2949. ret = crash_shutdown_register(ehea_crash_handler);
  2950. if (ret)
  2951. pr_info("failed registering crash handler\n");
  2952. ret = ibmebus_register_driver(&ehea_driver);
  2953. if (ret) {
  2954. pr_err("failed registering eHEA device driver on ebus\n");
  2955. goto out2;
  2956. }
  2957. ret = driver_create_file(&ehea_driver.driver,
  2958. &driver_attr_capabilities);
  2959. if (ret) {
  2960. pr_err("failed to register capabilities attribute, ret=%d\n",
  2961. ret);
  2962. goto out3;
  2963. }
  2964. return ret;
  2965. out3:
  2966. ibmebus_unregister_driver(&ehea_driver);
  2967. out2:
  2968. unregister_memory_notifier(&ehea_mem_nb);
  2969. unregister_reboot_notifier(&ehea_reboot_nb);
  2970. crash_shutdown_unregister(ehea_crash_handler);
  2971. out:
  2972. return ret;
  2973. }
  2974. static void __exit ehea_module_exit(void)
  2975. {
  2976. int ret;
  2977. driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
  2978. ibmebus_unregister_driver(&ehea_driver);
  2979. unregister_reboot_notifier(&ehea_reboot_nb);
  2980. ret = crash_shutdown_unregister(ehea_crash_handler);
  2981. if (ret)
  2982. pr_info("failed unregistering crash handler\n");
  2983. unregister_memory_notifier(&ehea_mem_nb);
  2984. kfree(ehea_fw_handles.arr);
  2985. kfree(ehea_bcmc_regs.arr);
  2986. ehea_destroy_busmap();
  2987. }
  2988. module_init(ehea_module_init);
  2989. module_exit(ehea_module_exit);