ehea_main.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652
  1. /*
  2. * linux/drivers/net/ehea/ehea_main.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #include <linux/in.h>
  29. #include <linux/ip.h>
  30. #include <linux/tcp.h>
  31. #include <linux/udp.h>
  32. #include <linux/if.h>
  33. #include <linux/list.h>
  34. #include <linux/if_ether.h>
  35. #include <net/ip.h>
  36. #include "ehea.h"
  37. #include "ehea_qmr.h"
  38. #include "ehea_phyp.h"
  39. MODULE_LICENSE("GPL");
  40. MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  41. MODULE_DESCRIPTION("IBM eServer HEA Driver");
  42. MODULE_VERSION(DRV_VERSION);
  43. static int msg_level = -1;
  44. static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  45. static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  46. static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  47. static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  48. module_param(msg_level, int, 0);
  49. module_param(rq1_entries, int, 0);
  50. module_param(rq2_entries, int, 0);
  51. module_param(rq3_entries, int, 0);
  52. module_param(sq_entries, int, 0);
  53. MODULE_PARM_DESC(msg_level, "msg_level");
  54. MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  55. "[2^x - 1], x = [6..14]. Default = "
  56. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  57. MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  58. "[2^x - 1], x = [6..14]. Default = "
  59. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  60. MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  61. "[2^x - 1], x = [6..14]. Default = "
  62. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  63. MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
  64. "[2^x - 1], x = [6..14]. Default = "
  65. __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  66. void ehea_dump(void *adr, int len, char *msg) {
  67. int x;
  68. unsigned char *deb = adr;
  69. for (x = 0; x < len; x += 16) {
  70. printk(DRV_NAME "%s adr=%p ofs=%04x %016lx %016lx\n", msg,
  71. deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
  72. deb += 16;
  73. }
  74. }
  75. static struct net_device_stats *ehea_get_stats(struct net_device *dev)
  76. {
  77. struct ehea_port *port = netdev_priv(dev);
  78. struct net_device_stats *stats = &port->stats;
  79. struct hcp_ehea_port_cb2 *cb2;
  80. u64 hret, rx_packets;
  81. int i;
  82. memset(stats, 0, sizeof(*stats));
  83. cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  84. if (!cb2) {
  85. ehea_error("no mem for cb2");
  86. goto out;
  87. }
  88. hret = ehea_h_query_ehea_port(port->adapter->handle,
  89. port->logical_port_id,
  90. H_PORT_CB2, H_PORT_CB2_ALL, cb2);
  91. if (hret != H_SUCCESS) {
  92. ehea_error("query_ehea_port failed");
  93. goto out_herr;
  94. }
  95. if (netif_msg_hw(port))
  96. ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
  97. rx_packets = 0;
  98. for (i = 0; i < port->num_def_qps; i++)
  99. rx_packets += port->port_res[i].rx_packets;
  100. stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
  101. stats->multicast = cb2->rxmcp;
  102. stats->rx_errors = cb2->rxuerr;
  103. stats->rx_bytes = cb2->rxo;
  104. stats->tx_bytes = cb2->txo;
  105. stats->rx_packets = rx_packets;
  106. out_herr:
  107. kfree(cb2);
  108. out:
  109. return stats;
  110. }
  111. static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
  112. {
  113. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  114. struct net_device *dev = pr->port->netdev;
  115. int max_index_mask = pr->rq1_skba.len - 1;
  116. int i;
  117. if (!nr_of_wqes)
  118. return;
  119. for (i = 0; i < nr_of_wqes; i++) {
  120. if (!skb_arr_rq1[index]) {
  121. skb_arr_rq1[index] = netdev_alloc_skb(dev,
  122. EHEA_L_PKT_SIZE);
  123. if (!skb_arr_rq1[index]) {
  124. ehea_error("%s: no mem for skb/%d wqes filled",
  125. dev->name, i);
  126. break;
  127. }
  128. }
  129. index--;
  130. index &= max_index_mask;
  131. }
  132. /* Ring doorbell */
  133. ehea_update_rq1a(pr->qp, i);
  134. }
  135. static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
  136. {
  137. int ret = 0;
  138. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  139. struct net_device *dev = pr->port->netdev;
  140. int i;
  141. for (i = 0; i < pr->rq1_skba.len; i++) {
  142. skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
  143. if (!skb_arr_rq1[i]) {
  144. ehea_error("%s: no mem for skb/%d wqes filled",
  145. dev->name, i);
  146. ret = -ENOMEM;
  147. goto out;
  148. }
  149. }
  150. /* Ring doorbell */
  151. ehea_update_rq1a(pr->qp, nr_rq1a);
  152. out:
  153. return ret;
  154. }
  155. static int ehea_refill_rq_def(struct ehea_port_res *pr,
  156. struct ehea_q_skb_arr *q_skba, int rq_nr,
  157. int num_wqes, int wqe_type, int packet_size)
  158. {
  159. struct net_device *dev = pr->port->netdev;
  160. struct ehea_qp *qp = pr->qp;
  161. struct sk_buff **skb_arr = q_skba->arr;
  162. struct ehea_rwqe *rwqe;
  163. int i, index, max_index_mask, fill_wqes;
  164. int ret = 0;
  165. fill_wqes = q_skba->os_skbs + num_wqes;
  166. if (!fill_wqes)
  167. return ret;
  168. index = q_skba->index;
  169. max_index_mask = q_skba->len - 1;
  170. for (i = 0; i < fill_wqes; i++) {
  171. struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
  172. if (!skb) {
  173. ehea_error("%s: no mem for skb/%d wqes filled",
  174. dev->name, i);
  175. q_skba->os_skbs = fill_wqes - i;
  176. ret = -ENOMEM;
  177. break;
  178. }
  179. skb_reserve(skb, NET_IP_ALIGN);
  180. skb_arr[index] = skb;
  181. rwqe = ehea_get_next_rwqe(qp, rq_nr);
  182. rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
  183. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
  184. rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
  185. rwqe->sg_list[0].vaddr = (u64)skb->data;
  186. rwqe->sg_list[0].len = packet_size;
  187. rwqe->data_segments = 1;
  188. index++;
  189. index &= max_index_mask;
  190. }
  191. q_skba->index = index;
  192. /* Ring doorbell */
  193. iosync();
  194. if (rq_nr == 2)
  195. ehea_update_rq2a(pr->qp, i);
  196. else
  197. ehea_update_rq3a(pr->qp, i);
  198. return ret;
  199. }
  200. static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
  201. {
  202. return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
  203. nr_of_wqes, EHEA_RWQE2_TYPE,
  204. EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
  205. }
  206. static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
  207. {
  208. return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
  209. nr_of_wqes, EHEA_RWQE3_TYPE,
  210. EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
  211. }
  212. static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
  213. {
  214. *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
  215. if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
  216. return 0;
  217. if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
  218. (cqe->header_length == 0))
  219. return 0;
  220. return -EINVAL;
  221. }
  222. static inline void ehea_fill_skb(struct net_device *dev,
  223. struct sk_buff *skb, struct ehea_cqe *cqe)
  224. {
  225. int length = cqe->num_bytes_transfered - 4; /*remove CRC */
  226. skb_put(skb, length);
  227. skb->ip_summed = CHECKSUM_UNNECESSARY;
  228. skb->protocol = eth_type_trans(skb, dev);
  229. }
  230. static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
  231. int arr_len,
  232. struct ehea_cqe *cqe)
  233. {
  234. int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  235. struct sk_buff *skb;
  236. void *pref;
  237. int x;
  238. x = skb_index + 1;
  239. x &= (arr_len - 1);
  240. pref = skb_array[x];
  241. prefetchw(pref);
  242. prefetchw(pref + EHEA_CACHE_LINE);
  243. pref = (skb_array[x]->data);
  244. prefetch(pref);
  245. prefetch(pref + EHEA_CACHE_LINE);
  246. prefetch(pref + EHEA_CACHE_LINE * 2);
  247. prefetch(pref + EHEA_CACHE_LINE * 3);
  248. skb = skb_array[skb_index];
  249. skb_array[skb_index] = NULL;
  250. return skb;
  251. }
  252. static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
  253. int arr_len, int wqe_index)
  254. {
  255. struct sk_buff *skb;
  256. void *pref;
  257. int x;
  258. x = wqe_index + 1;
  259. x &= (arr_len - 1);
  260. pref = skb_array[x];
  261. prefetchw(pref);
  262. prefetchw(pref + EHEA_CACHE_LINE);
  263. pref = (skb_array[x]->data);
  264. prefetchw(pref);
  265. prefetchw(pref + EHEA_CACHE_LINE);
  266. skb = skb_array[wqe_index];
  267. skb_array[wqe_index] = NULL;
  268. return skb;
  269. }
  270. static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
  271. struct ehea_cqe *cqe, int *processed_rq2,
  272. int *processed_rq3)
  273. {
  274. struct sk_buff *skb;
  275. if (netif_msg_rx_err(pr->port)) {
  276. ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
  277. ehea_dump(cqe, sizeof(*cqe), "CQE");
  278. }
  279. if (rq == 2) {
  280. *processed_rq2 += 1;
  281. skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
  282. dev_kfree_skb(skb);
  283. } else if (rq == 3) {
  284. *processed_rq3 += 1;
  285. skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
  286. dev_kfree_skb(skb);
  287. }
  288. if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
  289. ehea_error("Critical receive error. Resetting port.");
  290. queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
  291. return 1;
  292. }
  293. return 0;
  294. }
  295. static int ehea_poll(struct net_device *dev, int *budget)
  296. {
  297. struct ehea_port *port = netdev_priv(dev);
  298. struct ehea_port_res *pr = &port->port_res[0];
  299. struct ehea_qp *qp = pr->qp;
  300. struct ehea_cqe *cqe;
  301. struct sk_buff *skb;
  302. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  303. struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
  304. struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
  305. int skb_arr_rq1_len = pr->rq1_skba.len;
  306. int skb_arr_rq2_len = pr->rq2_skba.len;
  307. int skb_arr_rq3_len = pr->rq3_skba.len;
  308. int processed, processed_rq1, processed_rq2, processed_rq3;
  309. int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset;
  310. processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
  311. last_wqe_index = 0;
  312. my_quota = min(*budget, dev->quota);
  313. my_quota = min(my_quota, EHEA_POLL_MAX_RWQE);
  314. /* rq0 is low latency RQ */
  315. cqe = ehea_poll_rq1(qp, &wqe_index);
  316. while ((my_quota > 0) && cqe) {
  317. ehea_inc_rq1(qp);
  318. processed_rq1++;
  319. processed++;
  320. my_quota--;
  321. if (netif_msg_rx_status(port))
  322. ehea_dump(cqe, sizeof(*cqe), "CQE");
  323. last_wqe_index = wqe_index;
  324. rmb();
  325. if (!ehea_check_cqe(cqe, &rq)) {
  326. if (rq == 1) { /* LL RQ1 */
  327. skb = get_skb_by_index_ll(skb_arr_rq1,
  328. skb_arr_rq1_len,
  329. wqe_index);
  330. if (unlikely(!skb)) {
  331. if (netif_msg_rx_err(port))
  332. ehea_error("LL rq1: skb=NULL");
  333. skb = netdev_alloc_skb(dev,
  334. EHEA_L_PKT_SIZE);
  335. if (!skb)
  336. break;
  337. }
  338. memcpy(skb->data, ((char*)cqe) + 64,
  339. cqe->num_bytes_transfered - 4);
  340. ehea_fill_skb(dev, skb, cqe);
  341. } else if (rq == 2) { /* RQ2 */
  342. skb = get_skb_by_index(skb_arr_rq2,
  343. skb_arr_rq2_len, cqe);
  344. if (unlikely(!skb)) {
  345. if (netif_msg_rx_err(port))
  346. ehea_error("rq2: skb=NULL");
  347. break;
  348. }
  349. ehea_fill_skb(dev, skb, cqe);
  350. processed_rq2++;
  351. } else { /* RQ3 */
  352. skb = get_skb_by_index(skb_arr_rq3,
  353. skb_arr_rq3_len, cqe);
  354. if (unlikely(!skb)) {
  355. if (netif_msg_rx_err(port))
  356. ehea_error("rq3: skb=NULL");
  357. break;
  358. }
  359. ehea_fill_skb(dev, skb, cqe);
  360. processed_rq3++;
  361. }
  362. if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
  363. vlan_hwaccel_receive_skb(skb, port->vgrp,
  364. cqe->vlan_tag);
  365. else
  366. netif_receive_skb(skb);
  367. } else { /* Error occured */
  368. pr->p_state.poll_receive_errors++;
  369. port_reset = ehea_treat_poll_error(pr, rq, cqe,
  370. &processed_rq2,
  371. &processed_rq3);
  372. if (port_reset)
  373. break;
  374. }
  375. cqe = ehea_poll_rq1(qp, &wqe_index);
  376. }
  377. dev->quota -= processed;
  378. *budget -= processed;
  379. pr->p_state.ehea_poll += 1;
  380. pr->rx_packets += processed;
  381. ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
  382. ehea_refill_rq2(pr, processed_rq2);
  383. ehea_refill_rq3(pr, processed_rq3);
  384. intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF);
  385. if (!cqe || intreq) {
  386. netif_rx_complete(dev);
  387. ehea_reset_cq_ep(pr->recv_cq);
  388. ehea_reset_cq_n1(pr->recv_cq);
  389. cqe = hw_qeit_get_valid(&qp->hw_rqueue1);
  390. if (!cqe || intreq)
  391. return 0;
  392. if (!netif_rx_reschedule(dev, my_quota))
  393. return 0;
  394. }
  395. return 1;
  396. }
  397. void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
  398. {
  399. struct sk_buff *skb;
  400. int index, max_index_mask, i;
  401. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  402. max_index_mask = pr->sq_skba.len - 1;
  403. for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
  404. skb = pr->sq_skba.arr[index];
  405. if (likely(skb)) {
  406. dev_kfree_skb(skb);
  407. pr->sq_skba.arr[index] = NULL;
  408. } else {
  409. ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
  410. cqe->wr_id, i, index);
  411. }
  412. index--;
  413. index &= max_index_mask;
  414. }
  415. }
  416. #define MAX_SENDCOMP_QUOTA 400
  417. void ehea_send_irq_tasklet(unsigned long data)
  418. {
  419. struct ehea_port_res *pr = (struct ehea_port_res*)data;
  420. struct ehea_cq *send_cq = pr->send_cq;
  421. struct ehea_cqe *cqe;
  422. int quota = MAX_SENDCOMP_QUOTA;
  423. int cqe_counter = 0;
  424. int swqe_av = 0;
  425. unsigned long flags;
  426. do {
  427. cqe = ehea_poll_cq(send_cq);
  428. if (!cqe) {
  429. ehea_reset_cq_ep(send_cq);
  430. ehea_reset_cq_n1(send_cq);
  431. cqe = ehea_poll_cq(send_cq);
  432. if (!cqe)
  433. break;
  434. }
  435. cqe_counter++;
  436. rmb();
  437. if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
  438. ehea_error("Send Completion Error: Resetting port");
  439. if (netif_msg_tx_err(pr->port))
  440. ehea_dump(cqe, sizeof(*cqe), "Send CQE");
  441. queue_work(pr->port->adapter->ehea_wq,
  442. &pr->port->reset_task);
  443. break;
  444. }
  445. if (netif_msg_tx_done(pr->port))
  446. ehea_dump(cqe, sizeof(*cqe), "CQE");
  447. if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
  448. == EHEA_SWQE2_TYPE))
  449. free_sent_skbs(cqe, pr);
  450. swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
  451. quota--;
  452. } while (quota > 0);
  453. ehea_update_feca(send_cq, cqe_counter);
  454. atomic_add(swqe_av, &pr->swqe_avail);
  455. spin_lock_irqsave(&pr->netif_queue, flags);
  456. if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
  457. >= pr->swqe_refill_th)) {
  458. netif_wake_queue(pr->port->netdev);
  459. pr->queue_stopped = 0;
  460. }
  461. spin_unlock_irqrestore(&pr->netif_queue, flags);
  462. if (unlikely(cqe))
  463. tasklet_hi_schedule(&pr->send_comp_task);
  464. }
  465. static irqreturn_t ehea_send_irq_handler(int irq, void *param)
  466. {
  467. struct ehea_port_res *pr = param;
  468. tasklet_hi_schedule(&pr->send_comp_task);
  469. return IRQ_HANDLED;
  470. }
  471. static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
  472. {
  473. struct ehea_port_res *pr = param;
  474. struct ehea_port *port = pr->port;
  475. netif_rx_schedule(port->netdev);
  476. return IRQ_HANDLED;
  477. }
  478. static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
  479. {
  480. struct ehea_port *port = param;
  481. struct ehea_eqe *eqe;
  482. u32 qp_token;
  483. eqe = ehea_poll_eq(port->qp_eq);
  484. ehea_debug("eqe=%p", eqe);
  485. while (eqe) {
  486. ehea_debug("*eqe=%lx", *(u64*)eqe);
  487. eqe = ehea_poll_eq(port->qp_eq);
  488. qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
  489. ehea_debug("next eqe=%p", eqe);
  490. }
  491. return IRQ_HANDLED;
  492. }
  493. static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
  494. int logical_port)
  495. {
  496. int i;
  497. for (i = 0; i < adapter->num_ports; i++)
  498. if (adapter->port[i]->logical_port_id == logical_port)
  499. return adapter->port[i];
  500. return NULL;
  501. }
  502. int ehea_sense_port_attr(struct ehea_port *port)
  503. {
  504. int ret;
  505. u64 hret;
  506. struct hcp_ehea_port_cb0 *cb0;
  507. cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
  508. if (!cb0) { /* ehea_neq_tasklet() */
  509. ehea_error("no mem for cb0");
  510. ret = -ENOMEM;
  511. goto out;
  512. }
  513. hret = ehea_h_query_ehea_port(port->adapter->handle,
  514. port->logical_port_id, H_PORT_CB0,
  515. EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
  516. cb0);
  517. if (hret != H_SUCCESS) {
  518. ret = -EIO;
  519. goto out_free;
  520. }
  521. /* MAC address */
  522. port->mac_addr = cb0->port_mac_addr << 16;
  523. if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
  524. ret = -EADDRNOTAVAIL;
  525. goto out_free;
  526. }
  527. /* Port speed */
  528. switch (cb0->port_speed) {
  529. case H_SPEED_10M_H:
  530. port->port_speed = EHEA_SPEED_10M;
  531. port->full_duplex = 0;
  532. break;
  533. case H_SPEED_10M_F:
  534. port->port_speed = EHEA_SPEED_10M;
  535. port->full_duplex = 1;
  536. break;
  537. case H_SPEED_100M_H:
  538. port->port_speed = EHEA_SPEED_100M;
  539. port->full_duplex = 0;
  540. break;
  541. case H_SPEED_100M_F:
  542. port->port_speed = EHEA_SPEED_100M;
  543. port->full_duplex = 1;
  544. break;
  545. case H_SPEED_1G_F:
  546. port->port_speed = EHEA_SPEED_1G;
  547. port->full_duplex = 1;
  548. break;
  549. case H_SPEED_10G_F:
  550. port->port_speed = EHEA_SPEED_10G;
  551. port->full_duplex = 1;
  552. break;
  553. default:
  554. port->port_speed = 0;
  555. port->full_duplex = 0;
  556. break;
  557. }
  558. port->autoneg = 1;
  559. /* Number of default QPs */
  560. port->num_def_qps = cb0->num_default_qps;
  561. if (!port->num_def_qps) {
  562. ret = -EINVAL;
  563. goto out_free;
  564. }
  565. if (port->num_def_qps >= EHEA_NUM_TX_QP)
  566. port->num_add_tx_qps = 0;
  567. else
  568. port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps;
  569. ret = 0;
  570. out_free:
  571. if (ret || netif_msg_probe(port))
  572. ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
  573. kfree(cb0);
  574. out:
  575. return ret;
  576. }
  577. int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
  578. {
  579. struct hcp_ehea_port_cb4 *cb4;
  580. u64 hret;
  581. int ret = 0;
  582. cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  583. if (!cb4) {
  584. ehea_error("no mem for cb4");
  585. ret = -ENOMEM;
  586. goto out;
  587. }
  588. cb4->port_speed = port_speed;
  589. netif_carrier_off(port->netdev);
  590. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  591. port->logical_port_id,
  592. H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
  593. if (hret == H_SUCCESS) {
  594. port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
  595. hret = ehea_h_query_ehea_port(port->adapter->handle,
  596. port->logical_port_id,
  597. H_PORT_CB4, H_PORT_CB4_SPEED,
  598. cb4);
  599. if (hret == H_SUCCESS) {
  600. switch (cb4->port_speed) {
  601. case H_SPEED_10M_H:
  602. port->port_speed = EHEA_SPEED_10M;
  603. port->full_duplex = 0;
  604. break;
  605. case H_SPEED_10M_F:
  606. port->port_speed = EHEA_SPEED_10M;
  607. port->full_duplex = 1;
  608. break;
  609. case H_SPEED_100M_H:
  610. port->port_speed = EHEA_SPEED_100M;
  611. port->full_duplex = 0;
  612. break;
  613. case H_SPEED_100M_F:
  614. port->port_speed = EHEA_SPEED_100M;
  615. port->full_duplex = 1;
  616. break;
  617. case H_SPEED_1G_F:
  618. port->port_speed = EHEA_SPEED_1G;
  619. port->full_duplex = 1;
  620. break;
  621. case H_SPEED_10G_F:
  622. port->port_speed = EHEA_SPEED_10G;
  623. port->full_duplex = 1;
  624. break;
  625. default:
  626. port->port_speed = 0;
  627. port->full_duplex = 0;
  628. break;
  629. }
  630. } else {
  631. ehea_error("Failed sensing port speed");
  632. ret = -EIO;
  633. }
  634. } else {
  635. if (hret == H_AUTHORITY) {
  636. ehea_info("Hypervisor denied setting port speed. Either"
  637. " this partition is not authorized to set "
  638. "port speed or another partition has modified"
  639. " port speed first.");
  640. ret = -EPERM;
  641. } else {
  642. ret = -EIO;
  643. ehea_error("Failed setting port speed");
  644. }
  645. }
  646. netif_carrier_on(port->netdev);
  647. kfree(cb4);
  648. out:
  649. return ret;
  650. }
  651. static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
  652. {
  653. int ret;
  654. u8 ec;
  655. u8 portnum;
  656. struct ehea_port *port;
  657. ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
  658. portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
  659. port = ehea_get_port(adapter, portnum);
  660. switch (ec) {
  661. case EHEA_EC_PORTSTATE_CHG: /* port state change */
  662. if (!port) {
  663. ehea_error("unknown portnum %x", portnum);
  664. break;
  665. }
  666. if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
  667. if (!netif_carrier_ok(port->netdev)) {
  668. ret = ehea_sense_port_attr(port);
  669. if (ret) {
  670. ehea_error("failed resensing port "
  671. "attributes");
  672. break;
  673. }
  674. if (netif_msg_link(port))
  675. ehea_info("%s: Logical port up: %dMbps "
  676. "%s Duplex",
  677. port->netdev->name,
  678. port->port_speed,
  679. port->full_duplex ==
  680. 1 ? "Full" : "Half");
  681. netif_carrier_on(port->netdev);
  682. netif_wake_queue(port->netdev);
  683. }
  684. } else
  685. if (netif_carrier_ok(port->netdev)) {
  686. if (netif_msg_link(port))
  687. ehea_info("%s: Logical port down",
  688. port->netdev->name);
  689. netif_carrier_off(port->netdev);
  690. netif_stop_queue(port->netdev);
  691. }
  692. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
  693. if (netif_msg_link(port))
  694. ehea_info("%s: Physical port up",
  695. port->netdev->name);
  696. } else {
  697. if (netif_msg_link(port))
  698. ehea_info("%s: Physical port down",
  699. port->netdev->name);
  700. }
  701. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
  702. ehea_info("External switch port is primary port");
  703. else
  704. ehea_info("External switch port is backup port");
  705. break;
  706. case EHEA_EC_ADAPTER_MALFUNC:
  707. ehea_error("Adapter malfunction");
  708. break;
  709. case EHEA_EC_PORT_MALFUNC:
  710. ehea_info("Port malfunction: Device: %s", port->netdev->name);
  711. netif_carrier_off(port->netdev);
  712. netif_stop_queue(port->netdev);
  713. break;
  714. default:
  715. ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
  716. break;
  717. }
  718. }
  719. static void ehea_neq_tasklet(unsigned long data)
  720. {
  721. struct ehea_adapter *adapter = (struct ehea_adapter*)data;
  722. struct ehea_eqe *eqe;
  723. u64 event_mask;
  724. eqe = ehea_poll_eq(adapter->neq);
  725. ehea_debug("eqe=%p", eqe);
  726. while (eqe) {
  727. ehea_debug("*eqe=%lx", eqe->entry);
  728. ehea_parse_eqe(adapter, eqe->entry);
  729. eqe = ehea_poll_eq(adapter->neq);
  730. ehea_debug("next eqe=%p", eqe);
  731. }
  732. event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
  733. | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
  734. | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
  735. ehea_h_reset_events(adapter->handle,
  736. adapter->neq->fw_handle, event_mask);
  737. }
  738. static irqreturn_t ehea_interrupt_neq(int irq, void *param)
  739. {
  740. struct ehea_adapter *adapter = param;
  741. tasklet_hi_schedule(&adapter->neq_tasklet);
  742. return IRQ_HANDLED;
  743. }
  744. static int ehea_fill_port_res(struct ehea_port_res *pr)
  745. {
  746. int ret;
  747. struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
  748. ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
  749. - init_attr->act_nr_rwqes_rq2
  750. - init_attr->act_nr_rwqes_rq3 - 1);
  751. ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
  752. ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
  753. return ret;
  754. }
  755. static int ehea_reg_interrupts(struct net_device *dev)
  756. {
  757. struct ehea_port *port = netdev_priv(dev);
  758. struct ehea_port_res *pr;
  759. int i, ret;
  760. for (i = 0; i < port->num_def_qps; i++) {
  761. pr = &port->port_res[i];
  762. snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1
  763. , "%s-recv%d", dev->name, i);
  764. ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1,
  765. ehea_recv_irq_handler,
  766. SA_INTERRUPT, pr->int_recv_name, pr);
  767. if (ret) {
  768. ehea_error("failed registering irq for ehea_recv_int:"
  769. "port_res_nr:%d, ist=%X", i,
  770. pr->recv_eq->attr.ist1);
  771. goto out_free_seq;
  772. }
  773. if (netif_msg_ifup(port))
  774. ehea_info("irq_handle 0x%X for funct ehea_recv_int %d "
  775. "registered", pr->recv_eq->attr.ist1, i);
  776. }
  777. snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
  778. dev->name);
  779. ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1,
  780. ehea_qp_aff_irq_handler,
  781. SA_INTERRUPT, port->int_aff_name, port);
  782. if (ret) {
  783. ehea_error("failed registering irq for qp_aff_irq_handler:"
  784. "ist=%X", port->qp_eq->attr.ist1);
  785. goto out_free_qpeq;
  786. }
  787. if (netif_msg_ifup(port))
  788. ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
  789. "registered", port->qp_eq->attr.ist1);
  790. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  791. pr = &port->port_res[i];
  792. snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
  793. "%s-send%d", dev->name, i);
  794. ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1,
  795. ehea_send_irq_handler,
  796. SA_INTERRUPT, pr->int_send_name,
  797. pr);
  798. if (ret) {
  799. ehea_error("failed registering irq for ehea_send "
  800. "port_res_nr:%d, ist=%X", i,
  801. pr->send_eq->attr.ist1);
  802. goto out_free_req;
  803. }
  804. if (netif_msg_ifup(port))
  805. ehea_info("irq_handle 0x%X for function ehea_send_int "
  806. "%d registered", pr->send_eq->attr.ist1, i);
  807. }
  808. out:
  809. return ret;
  810. out_free_req:
  811. while (--i >= 0) {
  812. u32 ist = port->port_res[i].send_eq->attr.ist1;
  813. ibmebus_free_irq(NULL, ist, &port->port_res[i]);
  814. }
  815. out_free_qpeq:
  816. ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
  817. i = port->num_def_qps;
  818. out_free_seq:
  819. while (--i >= 0) {
  820. u32 ist = port->port_res[i].recv_eq->attr.ist1;
  821. ibmebus_free_irq(NULL, ist, &port->port_res[i]);
  822. }
  823. goto out;
  824. }
  825. static void ehea_free_interrupts(struct net_device *dev)
  826. {
  827. struct ehea_port *port = netdev_priv(dev);
  828. struct ehea_port_res *pr;
  829. int i;
  830. /* send */
  831. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  832. pr = &port->port_res[i];
  833. ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr);
  834. if (netif_msg_intr(port))
  835. ehea_info("free send irq for res %d with handle 0x%X",
  836. i, pr->send_eq->attr.ist1);
  837. }
  838. /* receive */
  839. for (i = 0; i < port->num_def_qps; i++) {
  840. pr = &port->port_res[i];
  841. ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr);
  842. if (netif_msg_intr(port))
  843. ehea_info("free recv irq for res %d with handle 0x%X",
  844. i, pr->recv_eq->attr.ist1);
  845. }
  846. /* associated events */
  847. ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
  848. if (netif_msg_intr(port))
  849. ehea_info("associated event interrupt for handle 0x%X freed",
  850. port->qp_eq->attr.ist1);
  851. }
  852. static int ehea_configure_port(struct ehea_port *port)
  853. {
  854. int ret, i;
  855. u64 hret, mask;
  856. struct hcp_ehea_port_cb0 *cb0;
  857. ret = -ENOMEM;
  858. cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  859. if (!cb0)
  860. goto out;
  861. cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
  862. | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
  863. | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
  864. | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
  865. | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
  866. PXLY_RC_VLAN_FILTER)
  867. | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
  868. for (i = 0; i < port->num_def_qps; i++)
  869. cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr;
  870. if (netif_msg_ifup(port))
  871. ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
  872. mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
  873. | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
  874. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  875. port->logical_port_id,
  876. H_PORT_CB0, mask, cb0);
  877. ret = -EIO;
  878. if (hret != H_SUCCESS)
  879. goto out_free;
  880. ret = 0;
  881. out_free:
  882. kfree(cb0);
  883. out:
  884. return ret;
  885. }
  886. static int ehea_gen_smrs(struct ehea_port_res *pr)
  887. {
  888. u64 hret;
  889. struct ehea_adapter *adapter = pr->port->adapter;
  890. hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle,
  891. adapter->mr.vaddr, EHEA_MR_ACC_CTRL,
  892. adapter->pd, &pr->send_mr);
  893. if (hret != H_SUCCESS)
  894. goto out;
  895. hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle,
  896. adapter->mr.vaddr, EHEA_MR_ACC_CTRL,
  897. adapter->pd, &pr->recv_mr);
  898. if (hret != H_SUCCESS)
  899. goto out_freeres;
  900. return 0;
  901. out_freeres:
  902. hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle);
  903. if (hret != H_SUCCESS)
  904. ehea_error("failed freeing SMR");
  905. out:
  906. return -EIO;
  907. }
  908. static int ehea_rem_smrs(struct ehea_port_res *pr)
  909. {
  910. struct ehea_adapter *adapter = pr->port->adapter;
  911. int ret = 0;
  912. u64 hret;
  913. hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle);
  914. if (hret != H_SUCCESS) {
  915. ret = -EIO;
  916. ehea_error("failed freeing send SMR for pr=%p", pr);
  917. }
  918. hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle);
  919. if (hret != H_SUCCESS) {
  920. ret = -EIO;
  921. ehea_error("failed freeing recv SMR for pr=%p", pr);
  922. }
  923. return ret;
  924. }
  925. static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
  926. {
  927. int arr_size = sizeof(void*) * max_q_entries;
  928. q_skba->arr = vmalloc(arr_size);
  929. if (!q_skba->arr)
  930. return -ENOMEM;
  931. memset(q_skba->arr, 0, arr_size);
  932. q_skba->len = max_q_entries;
  933. q_skba->index = 0;
  934. q_skba->os_skbs = 0;
  935. return 0;
  936. }
  937. static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
  938. struct port_res_cfg *pr_cfg, int queue_token)
  939. {
  940. struct ehea_adapter *adapter = port->adapter;
  941. enum ehea_eq_type eq_type = EHEA_EQ;
  942. struct ehea_qp_init_attr *init_attr = NULL;
  943. int ret = -EIO;
  944. memset(pr, 0, sizeof(struct ehea_port_res));
  945. pr->port = port;
  946. spin_lock_init(&pr->send_lock);
  947. spin_lock_init(&pr->recv_lock);
  948. spin_lock_init(&pr->xmit_lock);
  949. spin_lock_init(&pr->netif_queue);
  950. pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
  951. if (!pr->recv_eq) {
  952. ehea_error("create_eq failed (recv_eq)");
  953. goto out_free;
  954. }
  955. pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
  956. if (!pr->send_eq) {
  957. ehea_error("create_eq failed (send_eq)");
  958. goto out_free;
  959. }
  960. pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
  961. pr->recv_eq->fw_handle,
  962. port->logical_port_id);
  963. if (!pr->recv_cq) {
  964. ehea_error("create_cq failed (cq_recv)");
  965. goto out_free;
  966. }
  967. pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
  968. pr->send_eq->fw_handle,
  969. port->logical_port_id);
  970. if (!pr->send_cq) {
  971. ehea_error("create_cq failed (cq_send)");
  972. goto out_free;
  973. }
  974. if (netif_msg_ifup(port))
  975. ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
  976. pr->send_cq->attr.act_nr_of_cqes,
  977. pr->recv_cq->attr.act_nr_of_cqes);
  978. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  979. if (!init_attr) {
  980. ret = -ENOMEM;
  981. ehea_error("no mem for ehea_qp_init_attr");
  982. goto out_free;
  983. }
  984. init_attr->low_lat_rq1 = 1;
  985. init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
  986. init_attr->rq_count = 3;
  987. init_attr->qp_token = queue_token;
  988. init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
  989. init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
  990. init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
  991. init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
  992. init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
  993. init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
  994. init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
  995. init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
  996. init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
  997. init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
  998. init_attr->port_nr = port->logical_port_id;
  999. init_attr->send_cq_handle = pr->send_cq->fw_handle;
  1000. init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
  1001. init_attr->aff_eq_handle = port->qp_eq->fw_handle;
  1002. pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
  1003. if (!pr->qp) {
  1004. ehea_error("create_qp failed");
  1005. ret = -EIO;
  1006. goto out_free;
  1007. }
  1008. if (netif_msg_ifup(port))
  1009. ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
  1010. "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
  1011. init_attr->act_nr_send_wqes,
  1012. init_attr->act_nr_rwqes_rq1,
  1013. init_attr->act_nr_rwqes_rq2,
  1014. init_attr->act_nr_rwqes_rq3);
  1015. ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
  1016. ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
  1017. ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
  1018. ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
  1019. if (ret)
  1020. goto out_free;
  1021. pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
  1022. if (ehea_gen_smrs(pr) != 0) {
  1023. ret = -EIO;
  1024. goto out_free;
  1025. }
  1026. tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet,
  1027. (unsigned long)pr);
  1028. atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
  1029. kfree(init_attr);
  1030. ret = 0;
  1031. goto out;
  1032. out_free:
  1033. kfree(init_attr);
  1034. vfree(pr->sq_skba.arr);
  1035. vfree(pr->rq1_skba.arr);
  1036. vfree(pr->rq2_skba.arr);
  1037. vfree(pr->rq3_skba.arr);
  1038. ehea_destroy_qp(pr->qp);
  1039. ehea_destroy_cq(pr->send_cq);
  1040. ehea_destroy_cq(pr->recv_cq);
  1041. ehea_destroy_eq(pr->send_eq);
  1042. ehea_destroy_eq(pr->recv_eq);
  1043. out:
  1044. return ret;
  1045. }
  1046. static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
  1047. {
  1048. int ret, i;
  1049. ret = ehea_destroy_qp(pr->qp);
  1050. if (!ret) {
  1051. ehea_destroy_cq(pr->send_cq);
  1052. ehea_destroy_cq(pr->recv_cq);
  1053. ehea_destroy_eq(pr->send_eq);
  1054. ehea_destroy_eq(pr->recv_eq);
  1055. for (i = 0; i < pr->rq1_skba.len; i++)
  1056. if (pr->rq1_skba.arr[i])
  1057. dev_kfree_skb(pr->rq1_skba.arr[i]);
  1058. for (i = 0; i < pr->rq2_skba.len; i++)
  1059. if (pr->rq2_skba.arr[i])
  1060. dev_kfree_skb(pr->rq2_skba.arr[i]);
  1061. for (i = 0; i < pr->rq3_skba.len; i++)
  1062. if (pr->rq3_skba.arr[i])
  1063. dev_kfree_skb(pr->rq3_skba.arr[i]);
  1064. for (i = 0; i < pr->sq_skba.len; i++)
  1065. if (pr->sq_skba.arr[i])
  1066. dev_kfree_skb(pr->sq_skba.arr[i]);
  1067. vfree(pr->rq1_skba.arr);
  1068. vfree(pr->rq2_skba.arr);
  1069. vfree(pr->rq3_skba.arr);
  1070. vfree(pr->sq_skba.arr);
  1071. ret = ehea_rem_smrs(pr);
  1072. }
  1073. return ret;
  1074. }
  1075. /*
  1076. * The write_* functions store information in swqe which is used by
  1077. * the hardware to calculate the ip/tcp/udp checksum
  1078. */
  1079. static inline void write_ip_start_end(struct ehea_swqe *swqe,
  1080. const struct sk_buff *skb)
  1081. {
  1082. swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data));
  1083. swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1);
  1084. }
  1085. static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
  1086. const struct sk_buff *skb)
  1087. {
  1088. swqe->tcp_offset =
  1089. (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
  1090. swqe->tcp_end = (u16)skb->len - 1;
  1091. }
  1092. static inline void write_udp_offset_end(struct ehea_swqe *swqe,
  1093. const struct sk_buff *skb)
  1094. {
  1095. swqe->tcp_offset =
  1096. (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
  1097. swqe->tcp_end = (u16)skb->len - 1;
  1098. }
  1099. static void write_swqe2_TSO(struct sk_buff *skb,
  1100. struct ehea_swqe *swqe, u32 lkey)
  1101. {
  1102. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1103. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1104. int skb_data_size = skb->len - skb->data_len;
  1105. int headersize;
  1106. u64 tmp_addr;
  1107. /* Packet is TCP with TSO enabled */
  1108. swqe->tx_control |= EHEA_SWQE_TSO;
  1109. swqe->mss = skb_shinfo(skb)->gso_size;
  1110. /* copy only eth/ip/tcp headers to immediate data and
  1111. * the rest of skb->data to sg1entry
  1112. */
  1113. headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4);
  1114. skb_data_size = skb->len - skb->data_len;
  1115. if (skb_data_size >= headersize) {
  1116. /* copy immediate data */
  1117. memcpy(imm_data, skb->data, headersize);
  1118. swqe->immediate_data_length = headersize;
  1119. if (skb_data_size > headersize) {
  1120. /* set sg1entry data */
  1121. sg1entry->l_key = lkey;
  1122. sg1entry->len = skb_data_size - headersize;
  1123. tmp_addr = (u64)(skb->data + headersize);
  1124. sg1entry->vaddr = tmp_addr;
  1125. swqe->descriptors++;
  1126. }
  1127. } else
  1128. ehea_error("cannot handle fragmented headers");
  1129. }
  1130. static void write_swqe2_nonTSO(struct sk_buff *skb,
  1131. struct ehea_swqe *swqe, u32 lkey)
  1132. {
  1133. int skb_data_size = skb->len - skb->data_len;
  1134. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1135. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1136. u64 tmp_addr;
  1137. /* Packet is any nonTSO type
  1138. *
  1139. * Copy as much as possible skb->data to immediate data and
  1140. * the rest to sg1entry
  1141. */
  1142. if (skb_data_size >= SWQE2_MAX_IMM) {
  1143. /* copy immediate data */
  1144. memcpy(imm_data, skb->data, SWQE2_MAX_IMM);
  1145. swqe->immediate_data_length = SWQE2_MAX_IMM;
  1146. if (skb_data_size > SWQE2_MAX_IMM) {
  1147. /* copy sg1entry data */
  1148. sg1entry->l_key = lkey;
  1149. sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
  1150. tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
  1151. sg1entry->vaddr = tmp_addr;
  1152. swqe->descriptors++;
  1153. }
  1154. } else {
  1155. memcpy(imm_data, skb->data, skb_data_size);
  1156. swqe->immediate_data_length = skb_data_size;
  1157. }
  1158. }
  1159. static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
  1160. struct ehea_swqe *swqe, u32 lkey)
  1161. {
  1162. struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
  1163. skb_frag_t *frag;
  1164. int nfrags, sg1entry_contains_frag_data, i;
  1165. u64 tmp_addr;
  1166. nfrags = skb_shinfo(skb)->nr_frags;
  1167. sg1entry = &swqe->u.immdata_desc.sg_entry;
  1168. sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
  1169. swqe->descriptors = 0;
  1170. sg1entry_contains_frag_data = 0;
  1171. if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
  1172. write_swqe2_TSO(skb, swqe, lkey);
  1173. else
  1174. write_swqe2_nonTSO(skb, swqe, lkey);
  1175. /* write descriptors */
  1176. if (nfrags > 0) {
  1177. if (swqe->descriptors == 0) {
  1178. /* sg1entry not yet used */
  1179. frag = &skb_shinfo(skb)->frags[0];
  1180. /* copy sg1entry data */
  1181. sg1entry->l_key = lkey;
  1182. sg1entry->len = frag->size;
  1183. tmp_addr = (u64)(page_address(frag->page)
  1184. + frag->page_offset);
  1185. sg1entry->vaddr = tmp_addr;
  1186. swqe->descriptors++;
  1187. sg1entry_contains_frag_data = 1;
  1188. }
  1189. for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
  1190. frag = &skb_shinfo(skb)->frags[i];
  1191. sgentry = &sg_list[i - sg1entry_contains_frag_data];
  1192. sgentry->l_key = lkey;
  1193. sgentry->len = frag->size;
  1194. tmp_addr = (u64)(page_address(frag->page)
  1195. + frag->page_offset);
  1196. sgentry->vaddr = tmp_addr;
  1197. swqe->descriptors++;
  1198. }
  1199. }
  1200. }
  1201. static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
  1202. {
  1203. int ret = 0;
  1204. u64 hret;
  1205. u8 reg_type;
  1206. /* De/Register untagged packets */
  1207. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
  1208. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1209. port->logical_port_id,
  1210. reg_type, port->mac_addr, 0, hcallid);
  1211. if (hret != H_SUCCESS) {
  1212. ehea_error("reg_dereg_bcmc failed (tagged)");
  1213. ret = -EIO;
  1214. goto out_herr;
  1215. }
  1216. /* De/Register VLAN packets */
  1217. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
  1218. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1219. port->logical_port_id,
  1220. reg_type, port->mac_addr, 0, hcallid);
  1221. if (hret != H_SUCCESS) {
  1222. ehea_error("reg_dereg_bcmc failed (vlan)");
  1223. ret = -EIO;
  1224. }
  1225. out_herr:
  1226. return ret;
  1227. }
  1228. static int ehea_set_mac_addr(struct net_device *dev, void *sa)
  1229. {
  1230. struct ehea_port *port = netdev_priv(dev);
  1231. struct sockaddr *mac_addr = sa;
  1232. struct hcp_ehea_port_cb0 *cb0;
  1233. int ret;
  1234. u64 hret;
  1235. if (!is_valid_ether_addr(mac_addr->sa_data)) {
  1236. ret = -EADDRNOTAVAIL;
  1237. goto out;
  1238. }
  1239. cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1240. if (!cb0) {
  1241. ehea_error("no mem for cb0");
  1242. ret = -ENOMEM;
  1243. goto out;
  1244. }
  1245. memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
  1246. cb0->port_mac_addr = cb0->port_mac_addr >> 16;
  1247. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1248. port->logical_port_id, H_PORT_CB0,
  1249. EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
  1250. if (hret != H_SUCCESS) {
  1251. ret = -EIO;
  1252. goto out_free;
  1253. }
  1254. memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
  1255. /* Deregister old MAC in pHYP */
  1256. ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1257. if (ret)
  1258. goto out_free;
  1259. port->mac_addr = cb0->port_mac_addr << 16;
  1260. /* Register new MAC in pHYP */
  1261. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1262. if (ret)
  1263. goto out_free;
  1264. ret = 0;
  1265. out_free:
  1266. kfree(cb0);
  1267. out:
  1268. return ret;
  1269. }
  1270. static void ehea_promiscuous_error(u64 hret, int enable)
  1271. {
  1272. ehea_info("Hypervisor denied %sabling promiscuous mode.%s",
  1273. enable == 1 ? "en" : "dis",
  1274. hret != H_AUTHORITY ? "" : " Another partition owning a "
  1275. "logical port on the same physical port might have altered "
  1276. "promiscuous mode first.");
  1277. }
  1278. static void ehea_promiscuous(struct net_device *dev, int enable)
  1279. {
  1280. struct ehea_port *port = netdev_priv(dev);
  1281. struct hcp_ehea_port_cb7 *cb7;
  1282. u64 hret;
  1283. if ((enable && port->promisc) || (!enable && !port->promisc))
  1284. return;
  1285. cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  1286. if (!cb7) {
  1287. ehea_error("no mem for cb7");
  1288. goto out;
  1289. }
  1290. /* Modify Pxs_DUCQPN in CB7 */
  1291. cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
  1292. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1293. port->logical_port_id,
  1294. H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
  1295. if (hret) {
  1296. ehea_promiscuous_error(hret, enable);
  1297. goto out;
  1298. }
  1299. port->promisc = enable;
  1300. out:
  1301. kfree(cb7);
  1302. return;
  1303. }
  1304. static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
  1305. u32 hcallid)
  1306. {
  1307. u64 hret;
  1308. u8 reg_type;
  1309. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1310. | EHEA_BCMC_UNTAGGED;
  1311. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1312. port->logical_port_id,
  1313. reg_type, mc_mac_addr, 0, hcallid);
  1314. if (hret)
  1315. goto out;
  1316. reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
  1317. | EHEA_BCMC_VLANID_ALL;
  1318. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1319. port->logical_port_id,
  1320. reg_type, mc_mac_addr, 0, hcallid);
  1321. out:
  1322. return hret;
  1323. }
  1324. static int ehea_drop_multicast_list(struct net_device *dev)
  1325. {
  1326. struct ehea_port *port = netdev_priv(dev);
  1327. struct ehea_mc_list *mc_entry = port->mc_list;
  1328. struct list_head *pos;
  1329. struct list_head *temp;
  1330. int ret = 0;
  1331. u64 hret;
  1332. list_for_each_safe(pos, temp, &(port->mc_list->list)) {
  1333. mc_entry = list_entry(pos, struct ehea_mc_list, list);
  1334. hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
  1335. H_DEREG_BCMC);
  1336. if (hret) {
  1337. ehea_error("failed deregistering mcast MAC");
  1338. ret = -EIO;
  1339. }
  1340. list_del(pos);
  1341. kfree(mc_entry);
  1342. }
  1343. return ret;
  1344. }
  1345. static void ehea_allmulti(struct net_device *dev, int enable)
  1346. {
  1347. struct ehea_port *port = netdev_priv(dev);
  1348. u64 hret;
  1349. if (!port->allmulti) {
  1350. if (enable) {
  1351. /* Enable ALLMULTI */
  1352. ehea_drop_multicast_list(dev);
  1353. hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
  1354. if (!hret)
  1355. port->allmulti = 1;
  1356. else
  1357. ehea_error("failed enabling IFF_ALLMULTI");
  1358. }
  1359. } else
  1360. if (!enable) {
  1361. /* Disable ALLMULTI */
  1362. hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
  1363. if (!hret)
  1364. port->allmulti = 0;
  1365. else
  1366. ehea_error("failed disabling IFF_ALLMULTI");
  1367. }
  1368. }
  1369. static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
  1370. {
  1371. struct ehea_mc_list *ehea_mcl_entry;
  1372. u64 hret;
  1373. ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
  1374. if (!ehea_mcl_entry) {
  1375. ehea_error("no mem for mcl_entry");
  1376. return;
  1377. }
  1378. INIT_LIST_HEAD(&ehea_mcl_entry->list);
  1379. memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
  1380. hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
  1381. H_REG_BCMC);
  1382. if (!hret)
  1383. list_add(&ehea_mcl_entry->list, &port->mc_list->list);
  1384. else {
  1385. ehea_error("failed registering mcast MAC");
  1386. kfree(ehea_mcl_entry);
  1387. }
  1388. }
  1389. static void ehea_set_multicast_list(struct net_device *dev)
  1390. {
  1391. struct ehea_port *port = netdev_priv(dev);
  1392. struct dev_mc_list *k_mcl_entry;
  1393. int ret, i;
  1394. if (dev->flags & IFF_PROMISC) {
  1395. ehea_promiscuous(dev, 1);
  1396. return;
  1397. }
  1398. ehea_promiscuous(dev, 0);
  1399. if (dev->flags & IFF_ALLMULTI) {
  1400. ehea_allmulti(dev, 1);
  1401. return;
  1402. }
  1403. ehea_allmulti(dev, 0);
  1404. if (dev->mc_count) {
  1405. ret = ehea_drop_multicast_list(dev);
  1406. if (ret) {
  1407. /* Dropping the current multicast list failed.
  1408. * Enabling ALL_MULTI is the best we can do.
  1409. */
  1410. ehea_allmulti(dev, 1);
  1411. }
  1412. if (dev->mc_count > port->adapter->max_mc_mac) {
  1413. ehea_info("Mcast registration limit reached (0x%lx). "
  1414. "Use ALLMULTI!",
  1415. port->adapter->max_mc_mac);
  1416. goto out;
  1417. }
  1418. for (i = 0, k_mcl_entry = dev->mc_list;
  1419. i < dev->mc_count;
  1420. i++, k_mcl_entry = k_mcl_entry->next) {
  1421. ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
  1422. }
  1423. }
  1424. out:
  1425. return;
  1426. }
  1427. static int ehea_change_mtu(struct net_device *dev, int new_mtu)
  1428. {
  1429. if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
  1430. return -EINVAL;
  1431. dev->mtu = new_mtu;
  1432. return 0;
  1433. }
  1434. static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
  1435. struct ehea_swqe *swqe, u32 lkey)
  1436. {
  1437. if (skb->protocol == htons(ETH_P_IP)) {
  1438. /* IPv4 */
  1439. swqe->tx_control |= EHEA_SWQE_CRC
  1440. | EHEA_SWQE_IP_CHECKSUM
  1441. | EHEA_SWQE_TCP_CHECKSUM
  1442. | EHEA_SWQE_IMM_DATA_PRESENT
  1443. | EHEA_SWQE_DESCRIPTORS_PRESENT;
  1444. write_ip_start_end(swqe, skb);
  1445. if (skb->nh.iph->protocol == IPPROTO_UDP) {
  1446. if ((skb->nh.iph->frag_off & IP_MF) ||
  1447. (skb->nh.iph->frag_off & IP_OFFSET))
  1448. /* IP fragment, so don't change cs */
  1449. swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
  1450. else
  1451. write_udp_offset_end(swqe, skb);
  1452. } else if (skb->nh.iph->protocol == IPPROTO_TCP) {
  1453. write_tcp_offset_end(swqe, skb);
  1454. }
  1455. /* icmp (big data) and ip segmentation packets (all other ip
  1456. packets) do not require any special handling */
  1457. } else {
  1458. /* Other Ethernet Protocol */
  1459. swqe->tx_control |= EHEA_SWQE_CRC
  1460. | EHEA_SWQE_IMM_DATA_PRESENT
  1461. | EHEA_SWQE_DESCRIPTORS_PRESENT;
  1462. }
  1463. write_swqe2_data(skb, dev, swqe, lkey);
  1464. }
  1465. static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
  1466. struct ehea_swqe *swqe)
  1467. {
  1468. int nfrags = skb_shinfo(skb)->nr_frags;
  1469. u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
  1470. skb_frag_t *frag;
  1471. int i;
  1472. if (skb->protocol == htons(ETH_P_IP)) {
  1473. /* IPv4 */
  1474. write_ip_start_end(swqe, skb);
  1475. if (skb->nh.iph->protocol == IPPROTO_TCP) {
  1476. swqe->tx_control |= EHEA_SWQE_CRC
  1477. | EHEA_SWQE_IP_CHECKSUM
  1478. | EHEA_SWQE_TCP_CHECKSUM
  1479. | EHEA_SWQE_IMM_DATA_PRESENT;
  1480. write_tcp_offset_end(swqe, skb);
  1481. } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
  1482. if ((skb->nh.iph->frag_off & IP_MF) ||
  1483. (skb->nh.iph->frag_off & IP_OFFSET))
  1484. /* IP fragment, so don't change cs */
  1485. swqe->tx_control |= EHEA_SWQE_CRC
  1486. | EHEA_SWQE_IMM_DATA_PRESENT;
  1487. else {
  1488. swqe->tx_control |= EHEA_SWQE_CRC
  1489. | EHEA_SWQE_IP_CHECKSUM
  1490. | EHEA_SWQE_TCP_CHECKSUM
  1491. | EHEA_SWQE_IMM_DATA_PRESENT;
  1492. write_udp_offset_end(swqe, skb);
  1493. }
  1494. } else {
  1495. /* icmp (big data) and
  1496. ip segmentation packets (all other ip packets) */
  1497. swqe->tx_control |= EHEA_SWQE_CRC
  1498. | EHEA_SWQE_IP_CHECKSUM
  1499. | EHEA_SWQE_IMM_DATA_PRESENT;
  1500. }
  1501. } else {
  1502. /* Other Ethernet Protocol */
  1503. swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
  1504. }
  1505. /* copy (immediate) data */
  1506. if (nfrags == 0) {
  1507. /* data is in a single piece */
  1508. memcpy(imm_data, skb->data, skb->len);
  1509. } else {
  1510. /* first copy data from the skb->data buffer ... */
  1511. memcpy(imm_data, skb->data, skb->len - skb->data_len);
  1512. imm_data += skb->len - skb->data_len;
  1513. /* ... then copy data from the fragments */
  1514. for (i = 0; i < nfrags; i++) {
  1515. frag = &skb_shinfo(skb)->frags[i];
  1516. memcpy(imm_data,
  1517. page_address(frag->page) + frag->page_offset,
  1518. frag->size);
  1519. imm_data += frag->size;
  1520. }
  1521. }
  1522. swqe->immediate_data_length = skb->len;
  1523. dev_kfree_skb(skb);
  1524. }
  1525. static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1526. {
  1527. struct ehea_port *port = netdev_priv(dev);
  1528. struct ehea_swqe *swqe;
  1529. unsigned long flags;
  1530. u32 lkey;
  1531. int swqe_index;
  1532. struct ehea_port_res *pr = &port->port_res[0];
  1533. spin_lock(&pr->xmit_lock);
  1534. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  1535. memset(swqe, 0, SWQE_HEADER_SIZE);
  1536. atomic_dec(&pr->swqe_avail);
  1537. if (skb->len <= SWQE3_MAX_IMM) {
  1538. u32 sig_iv = port->sig_comp_iv;
  1539. u32 swqe_num = pr->swqe_id_counter;
  1540. ehea_xmit3(skb, dev, swqe);
  1541. swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
  1542. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
  1543. if (pr->swqe_ll_count >= (sig_iv - 1)) {
  1544. swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
  1545. sig_iv);
  1546. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1547. pr->swqe_ll_count = 0;
  1548. } else
  1549. pr->swqe_ll_count += 1;
  1550. } else {
  1551. swqe->wr_id =
  1552. EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
  1553. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
  1554. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
  1555. pr->sq_skba.arr[pr->sq_skba.index] = skb;
  1556. pr->sq_skba.index++;
  1557. pr->sq_skba.index &= (pr->sq_skba.len - 1);
  1558. lkey = pr->send_mr.lkey;
  1559. ehea_xmit2(skb, dev, swqe, lkey);
  1560. if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
  1561. swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
  1562. EHEA_SIG_IV_LONG);
  1563. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1564. pr->swqe_count = 0;
  1565. } else
  1566. pr->swqe_count += 1;
  1567. }
  1568. pr->swqe_id_counter += 1;
  1569. if (port->vgrp && vlan_tx_tag_present(skb)) {
  1570. swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
  1571. swqe->vlan_tag = vlan_tx_tag_get(skb);
  1572. }
  1573. if (netif_msg_tx_queued(port)) {
  1574. ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
  1575. ehea_dump(swqe, 512, "swqe");
  1576. }
  1577. ehea_post_swqe(pr->qp, swqe);
  1578. pr->tx_packets++;
  1579. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1580. spin_lock_irqsave(&pr->netif_queue, flags);
  1581. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1582. netif_stop_queue(dev);
  1583. pr->queue_stopped = 1;
  1584. }
  1585. spin_unlock_irqrestore(&pr->netif_queue, flags);
  1586. }
  1587. dev->trans_start = jiffies;
  1588. spin_unlock(&pr->xmit_lock);
  1589. return NETDEV_TX_OK;
  1590. }
  1591. static void ehea_vlan_rx_register(struct net_device *dev,
  1592. struct vlan_group *grp)
  1593. {
  1594. struct ehea_port *port = netdev_priv(dev);
  1595. struct ehea_adapter *adapter = port->adapter;
  1596. struct hcp_ehea_port_cb1 *cb1;
  1597. u64 hret;
  1598. port->vgrp = grp;
  1599. cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1600. if (!cb1) {
  1601. ehea_error("no mem for cb1");
  1602. goto out;
  1603. }
  1604. if (grp)
  1605. memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
  1606. else
  1607. memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter));
  1608. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1609. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1610. if (hret != H_SUCCESS)
  1611. ehea_error("modify_ehea_port failed");
  1612. kfree(cb1);
  1613. out:
  1614. return;
  1615. }
  1616. static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  1617. {
  1618. struct ehea_port *port = netdev_priv(dev);
  1619. struct ehea_adapter *adapter = port->adapter;
  1620. struct hcp_ehea_port_cb1 *cb1;
  1621. int index;
  1622. u64 hret;
  1623. cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1624. if (!cb1) {
  1625. ehea_error("no mem for cb1");
  1626. goto out;
  1627. }
  1628. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1629. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1630. if (hret != H_SUCCESS) {
  1631. ehea_error("query_ehea_port failed");
  1632. goto out;
  1633. }
  1634. index = (vid / 64);
  1635. cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F)));
  1636. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1637. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1638. if (hret != H_SUCCESS)
  1639. ehea_error("modify_ehea_port failed");
  1640. out:
  1641. kfree(cb1);
  1642. return;
  1643. }
  1644. static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  1645. {
  1646. struct ehea_port *port = netdev_priv(dev);
  1647. struct ehea_adapter *adapter = port->adapter;
  1648. struct hcp_ehea_port_cb1 *cb1;
  1649. int index;
  1650. u64 hret;
  1651. if (port->vgrp)
  1652. port->vgrp->vlan_devices[vid] = NULL;
  1653. cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1654. if (!cb1) {
  1655. ehea_error("no mem for cb1");
  1656. goto out;
  1657. }
  1658. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1659. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1660. if (hret != H_SUCCESS) {
  1661. ehea_error("query_ehea_port failed");
  1662. goto out;
  1663. }
  1664. index = (vid / 64);
  1665. cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F)));
  1666. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1667. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1668. if (hret != H_SUCCESS)
  1669. ehea_error("modify_ehea_port failed");
  1670. out:
  1671. kfree(cb1);
  1672. return;
  1673. }
  1674. int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
  1675. {
  1676. int ret = -EIO;
  1677. u64 hret;
  1678. u16 dummy16 = 0;
  1679. u64 dummy64 = 0;
  1680. struct hcp_modify_qp_cb0* cb0;
  1681. cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1682. if (!cb0) {
  1683. ret = -ENOMEM;
  1684. goto out;
  1685. }
  1686. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1687. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1688. if (hret != H_SUCCESS) {
  1689. ehea_error("query_ehea_qp failed (1)");
  1690. goto out;
  1691. }
  1692. cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
  1693. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1694. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1695. &dummy64, &dummy64, &dummy16, &dummy16);
  1696. if (hret != H_SUCCESS) {
  1697. ehea_error("modify_ehea_qp failed (1)");
  1698. goto out;
  1699. }
  1700. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1701. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1702. if (hret != H_SUCCESS) {
  1703. ehea_error("query_ehea_qp failed (2)");
  1704. goto out;
  1705. }
  1706. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
  1707. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1708. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1709. &dummy64, &dummy64, &dummy16, &dummy16);
  1710. if (hret != H_SUCCESS) {
  1711. ehea_error("modify_ehea_qp failed (2)");
  1712. goto out;
  1713. }
  1714. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1715. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1716. if (hret != H_SUCCESS) {
  1717. ehea_error("query_ehea_qp failed (3)");
  1718. goto out;
  1719. }
  1720. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
  1721. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1722. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1723. &dummy64, &dummy64, &dummy16, &dummy16);
  1724. if (hret != H_SUCCESS) {
  1725. ehea_error("modify_ehea_qp failed (3)");
  1726. goto out;
  1727. }
  1728. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1729. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1730. if (hret != H_SUCCESS) {
  1731. ehea_error("query_ehea_qp failed (4)");
  1732. goto out;
  1733. }
  1734. ret = 0;
  1735. out:
  1736. kfree(cb0);
  1737. return ret;
  1738. }
  1739. static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
  1740. int add_tx_qps)
  1741. {
  1742. int ret, i;
  1743. struct port_res_cfg pr_cfg, pr_cfg_small_rx;
  1744. enum ehea_eq_type eq_type = EHEA_EQ;
  1745. port->qp_eq = ehea_create_eq(port->adapter, eq_type,
  1746. EHEA_MAX_ENTRIES_EQ, 1);
  1747. if (!port->qp_eq) {
  1748. ret = -EINVAL;
  1749. ehea_error("ehea_create_eq failed (qp_eq)");
  1750. goto out_kill_eq;
  1751. }
  1752. pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
  1753. pr_cfg.max_entries_scq = sq_entries;
  1754. pr_cfg.max_entries_sq = sq_entries;
  1755. pr_cfg.max_entries_rq1 = rq1_entries;
  1756. pr_cfg.max_entries_rq2 = rq2_entries;
  1757. pr_cfg.max_entries_rq3 = rq3_entries;
  1758. pr_cfg_small_rx.max_entries_rcq = 1;
  1759. pr_cfg_small_rx.max_entries_scq = sq_entries;
  1760. pr_cfg_small_rx.max_entries_sq = sq_entries;
  1761. pr_cfg_small_rx.max_entries_rq1 = 1;
  1762. pr_cfg_small_rx.max_entries_rq2 = 1;
  1763. pr_cfg_small_rx.max_entries_rq3 = 1;
  1764. for (i = 0; i < def_qps; i++) {
  1765. ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
  1766. if (ret)
  1767. goto out_clean_pr;
  1768. }
  1769. for (i = def_qps; i < def_qps + add_tx_qps; i++) {
  1770. ret = ehea_init_port_res(port, &port->port_res[i],
  1771. &pr_cfg_small_rx, i);
  1772. if (ret)
  1773. goto out_clean_pr;
  1774. }
  1775. return 0;
  1776. out_clean_pr:
  1777. while (--i >= 0)
  1778. ehea_clean_portres(port, &port->port_res[i]);
  1779. out_kill_eq:
  1780. ehea_destroy_eq(port->qp_eq);
  1781. return ret;
  1782. }
  1783. static int ehea_clean_all_portres(struct ehea_port *port)
  1784. {
  1785. int ret = 0;
  1786. int i;
  1787. for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
  1788. ret |= ehea_clean_portres(port, &port->port_res[i]);
  1789. ret |= ehea_destroy_eq(port->qp_eq);
  1790. return ret;
  1791. }
  1792. static int ehea_up(struct net_device *dev)
  1793. {
  1794. int ret, i;
  1795. struct ehea_port *port = netdev_priv(dev);
  1796. u64 mac_addr = 0;
  1797. if (port->state == EHEA_PORT_UP)
  1798. return 0;
  1799. ret = ehea_port_res_setup(port, port->num_def_qps,
  1800. port->num_add_tx_qps);
  1801. if (ret) {
  1802. ehea_error("port_res_failed");
  1803. goto out;
  1804. }
  1805. /* Set default QP for this port */
  1806. ret = ehea_configure_port(port);
  1807. if (ret) {
  1808. ehea_error("ehea_configure_port failed. ret:%d", ret);
  1809. goto out_clean_pr;
  1810. }
  1811. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1812. if (ret) {
  1813. ret = -EIO;
  1814. ehea_error("out_clean_pr");
  1815. goto out_clean_pr;
  1816. }
  1817. mac_addr = (*(u64*)dev->dev_addr) >> 16;
  1818. ret = ehea_reg_interrupts(dev);
  1819. if (ret) {
  1820. ehea_error("out_dereg_bc");
  1821. goto out_dereg_bc;
  1822. }
  1823. for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
  1824. ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
  1825. if (ret) {
  1826. ehea_error("activate_qp failed");
  1827. goto out_free_irqs;
  1828. }
  1829. }
  1830. for(i = 0; i < port->num_def_qps; i++) {
  1831. ret = ehea_fill_port_res(&port->port_res[i]);
  1832. if (ret) {
  1833. ehea_error("out_free_irqs");
  1834. goto out_free_irqs;
  1835. }
  1836. }
  1837. ret = 0;
  1838. port->state = EHEA_PORT_UP;
  1839. goto out;
  1840. out_free_irqs:
  1841. ehea_free_interrupts(dev);
  1842. out_dereg_bc:
  1843. ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1844. out_clean_pr:
  1845. ehea_clean_all_portres(port);
  1846. out:
  1847. return ret;
  1848. }
  1849. static int ehea_open(struct net_device *dev)
  1850. {
  1851. int ret;
  1852. struct ehea_port *port = netdev_priv(dev);
  1853. down(&port->port_lock);
  1854. if (netif_msg_ifup(port))
  1855. ehea_info("enabling port %s", dev->name);
  1856. ret = ehea_up(dev);
  1857. if (!ret)
  1858. netif_start_queue(dev);
  1859. up(&port->port_lock);
  1860. return ret;
  1861. }
  1862. static int ehea_down(struct net_device *dev)
  1863. {
  1864. int ret, i;
  1865. struct ehea_port *port = netdev_priv(dev);
  1866. if (port->state == EHEA_PORT_DOWN)
  1867. return 0;
  1868. ehea_drop_multicast_list(dev);
  1869. ehea_free_interrupts(dev);
  1870. for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
  1871. tasklet_kill(&port->port_res[i].send_comp_task);
  1872. ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1873. ret = ehea_clean_all_portres(port);
  1874. port->state = EHEA_PORT_DOWN;
  1875. return ret;
  1876. }
  1877. static int ehea_stop(struct net_device *dev)
  1878. {
  1879. int ret;
  1880. struct ehea_port *port = netdev_priv(dev);
  1881. if (netif_msg_ifdown(port))
  1882. ehea_info("disabling port %s", dev->name);
  1883. flush_workqueue(port->adapter->ehea_wq);
  1884. down(&port->port_lock);
  1885. netif_stop_queue(dev);
  1886. ret = ehea_down(dev);
  1887. up(&port->port_lock);
  1888. return ret;
  1889. }
  1890. static void ehea_reset_port(struct work_struct *work)
  1891. {
  1892. int ret;
  1893. struct ehea_port *port =
  1894. container_of(work, struct ehea_port, reset_task);
  1895. struct net_device *dev = port->netdev;
  1896. port->resets++;
  1897. down(&port->port_lock);
  1898. netif_stop_queue(dev);
  1899. netif_poll_disable(dev);
  1900. ret = ehea_down(dev);
  1901. if (ret)
  1902. ehea_error("ehea_down failed. not all resources are freed");
  1903. ret = ehea_up(dev);
  1904. if (ret) {
  1905. ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
  1906. goto out;
  1907. }
  1908. if (netif_msg_timer(port))
  1909. ehea_info("Device %s resetted successfully", dev->name);
  1910. netif_poll_enable(dev);
  1911. netif_wake_queue(dev);
  1912. out:
  1913. up(&port->port_lock);
  1914. return;
  1915. }
  1916. static void ehea_tx_watchdog(struct net_device *dev)
  1917. {
  1918. struct ehea_port *port = netdev_priv(dev);
  1919. if (netif_carrier_ok(dev))
  1920. queue_work(port->adapter->ehea_wq, &port->reset_task);
  1921. }
  1922. int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
  1923. {
  1924. struct hcp_query_ehea *cb;
  1925. u64 hret;
  1926. int ret;
  1927. cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1928. if (!cb) {
  1929. ret = -ENOMEM;
  1930. goto out;
  1931. }
  1932. hret = ehea_h_query_ehea(adapter->handle, cb);
  1933. if (hret != H_SUCCESS) {
  1934. ret = -EIO;
  1935. goto out_herr;
  1936. }
  1937. adapter->num_ports = cb->num_ports;
  1938. adapter->max_mc_mac = cb->max_mc_mac - 1;
  1939. ret = 0;
  1940. out_herr:
  1941. kfree(cb);
  1942. out:
  1943. return ret;
  1944. }
  1945. static int ehea_setup_single_port(struct ehea_port *port,
  1946. struct device_node *dn)
  1947. {
  1948. int ret;
  1949. u64 hret;
  1950. struct net_device *dev = port->netdev;
  1951. struct ehea_adapter *adapter = port->adapter;
  1952. struct hcp_ehea_port_cb4 *cb4;
  1953. u32 *dn_log_port_id;
  1954. sema_init(&port->port_lock, 1);
  1955. port->state = EHEA_PORT_DOWN;
  1956. port->sig_comp_iv = sq_entries / 10;
  1957. if (!dn) {
  1958. ehea_error("bad device node: dn=%p", dn);
  1959. ret = -EINVAL;
  1960. goto out;
  1961. }
  1962. port->of_dev_node = dn;
  1963. /* Determine logical port id */
  1964. dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL);
  1965. if (!dn_log_port_id) {
  1966. ehea_error("bad device node: dn_log_port_id=%p",
  1967. dn_log_port_id);
  1968. ret = -EINVAL;
  1969. goto out;
  1970. }
  1971. port->logical_port_id = *dn_log_port_id;
  1972. port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
  1973. if (!port->mc_list) {
  1974. ret = -ENOMEM;
  1975. goto out;
  1976. }
  1977. INIT_LIST_HEAD(&port->mc_list->list);
  1978. ret = ehea_sense_port_attr(port);
  1979. if (ret)
  1980. goto out;
  1981. /* Enable Jumbo frames */
  1982. cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1983. if (!cb4) {
  1984. ehea_error("no mem for cb4");
  1985. } else {
  1986. cb4->jumbo_frame = 1;
  1987. hret = ehea_h_modify_ehea_port(adapter->handle,
  1988. port->logical_port_id,
  1989. H_PORT_CB4, H_PORT_CB4_JUMBO,
  1990. cb4);
  1991. if (hret != H_SUCCESS) {
  1992. ehea_info("Jumbo frames not activated");
  1993. }
  1994. kfree(cb4);
  1995. }
  1996. /* initialize net_device structure */
  1997. SET_MODULE_OWNER(dev);
  1998. memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
  1999. dev->open = ehea_open;
  2000. dev->poll = ehea_poll;
  2001. dev->weight = 64;
  2002. dev->stop = ehea_stop;
  2003. dev->hard_start_xmit = ehea_start_xmit;
  2004. dev->get_stats = ehea_get_stats;
  2005. dev->set_multicast_list = ehea_set_multicast_list;
  2006. dev->set_mac_address = ehea_set_mac_addr;
  2007. dev->change_mtu = ehea_change_mtu;
  2008. dev->vlan_rx_register = ehea_vlan_rx_register;
  2009. dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
  2010. dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
  2011. dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
  2012. | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
  2013. | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
  2014. | NETIF_F_LLTX;
  2015. dev->tx_timeout = &ehea_tx_watchdog;
  2016. dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
  2017. INIT_WORK(&port->reset_task, ehea_reset_port);
  2018. ehea_set_ethtool_ops(dev);
  2019. ret = register_netdev(dev);
  2020. if (ret) {
  2021. ehea_error("register_netdev failed. ret=%d", ret);
  2022. goto out_free;
  2023. }
  2024. port->netdev = dev;
  2025. ret = 0;
  2026. goto out;
  2027. out_free:
  2028. kfree(port->mc_list);
  2029. out:
  2030. return ret;
  2031. }
  2032. static int ehea_setup_ports(struct ehea_adapter *adapter)
  2033. {
  2034. int ret;
  2035. int port_setup_ok = 0;
  2036. struct ehea_port *port;
  2037. struct device_node *dn = NULL;
  2038. struct net_device *dev;
  2039. int i;
  2040. /* get port properties for all ports */
  2041. for (i = 0; i < adapter->num_ports; i++) {
  2042. if (adapter->port[i])
  2043. continue; /* port already up and running */
  2044. /* allocate memory for the port structures */
  2045. dev = alloc_etherdev(sizeof(struct ehea_port));
  2046. if (!dev) {
  2047. ehea_error("no mem for net_device");
  2048. break;
  2049. }
  2050. port = netdev_priv(dev);
  2051. port->adapter = adapter;
  2052. port->netdev = dev;
  2053. adapter->port[i] = port;
  2054. port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
  2055. dn = of_find_node_by_name(dn, "ethernet");
  2056. ret = ehea_setup_single_port(port, dn);
  2057. if (ret) {
  2058. /* Free mem for this port struct. The others will be
  2059. processed on rollback */
  2060. free_netdev(dev);
  2061. adapter->port[i] = NULL;
  2062. ehea_error("eHEA port %d setup failed, ret=%d", i, ret);
  2063. }
  2064. }
  2065. of_node_put(dn);
  2066. /* Check for succesfully set up ports */
  2067. for (i = 0; i < adapter->num_ports; i++)
  2068. if (adapter->port[i])
  2069. port_setup_ok++;
  2070. if (port_setup_ok)
  2071. ret = 0; /* At least some ports are setup correctly */
  2072. else
  2073. ret = -EINVAL;
  2074. return ret;
  2075. }
  2076. static int __devinit ehea_probe(struct ibmebus_dev *dev,
  2077. const struct of_device_id *id)
  2078. {
  2079. struct ehea_adapter *adapter;
  2080. u64 *adapter_handle;
  2081. int ret;
  2082. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  2083. if (!adapter) {
  2084. ret = -ENOMEM;
  2085. dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n");
  2086. goto out;
  2087. }
  2088. adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle",
  2089. NULL);
  2090. if (adapter_handle)
  2091. adapter->handle = *adapter_handle;
  2092. if (!adapter->handle) {
  2093. dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
  2094. " '%s'\n", dev->ofdev.node->full_name);
  2095. ret = -ENODEV;
  2096. goto out_free_ad;
  2097. }
  2098. adapter->pd = EHEA_PD_ID;
  2099. dev->ofdev.dev.driver_data = adapter;
  2100. ret = ehea_reg_mr_adapter(adapter);
  2101. if (ret) {
  2102. dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n");
  2103. goto out_free_ad;
  2104. }
  2105. /* initialize adapter and ports */
  2106. /* get adapter properties */
  2107. ret = ehea_sense_adapter_attr(adapter);
  2108. if (ret) {
  2109. dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
  2110. goto out_free_res;
  2111. }
  2112. dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports);
  2113. adapter->neq = ehea_create_eq(adapter,
  2114. EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
  2115. if (!adapter->neq) {
  2116. dev_err(&dev->ofdev.dev, "NEQ creation failed");
  2117. goto out_free_res;
  2118. }
  2119. tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
  2120. (unsigned long)adapter);
  2121. ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1,
  2122. ehea_interrupt_neq, SA_INTERRUPT,
  2123. "ehea_neq", adapter);
  2124. if (ret) {
  2125. dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed");
  2126. goto out_kill_eq;
  2127. }
  2128. adapter->ehea_wq = create_workqueue("ehea_wq");
  2129. if (!adapter->ehea_wq)
  2130. goto out_free_irq;
  2131. ret = ehea_setup_ports(adapter);
  2132. if (ret) {
  2133. dev_err(&dev->ofdev.dev, "setup_ports failed");
  2134. goto out_kill_wq;
  2135. }
  2136. ret = 0;
  2137. goto out;
  2138. out_kill_wq:
  2139. destroy_workqueue(adapter->ehea_wq);
  2140. out_free_irq:
  2141. ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
  2142. out_kill_eq:
  2143. ehea_destroy_eq(adapter->neq);
  2144. out_free_res:
  2145. ehea_h_free_resource(adapter->handle, adapter->mr.handle);
  2146. out_free_ad:
  2147. kfree(adapter);
  2148. out:
  2149. return ret;
  2150. }
  2151. static void ehea_shutdown_single_port(struct ehea_port *port)
  2152. {
  2153. unregister_netdev(port->netdev);
  2154. kfree(port->mc_list);
  2155. free_netdev(port->netdev);
  2156. }
  2157. static int __devexit ehea_remove(struct ibmebus_dev *dev)
  2158. {
  2159. struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
  2160. u64 hret;
  2161. int i;
  2162. for (i = 0; i < adapter->num_ports; i++)
  2163. if (adapter->port[i]) {
  2164. ehea_shutdown_single_port(adapter->port[i]);
  2165. adapter->port[i] = NULL;
  2166. }
  2167. destroy_workqueue(adapter->ehea_wq);
  2168. ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
  2169. ehea_destroy_eq(adapter->neq);
  2170. hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle);
  2171. if (hret) {
  2172. dev_err(&dev->ofdev.dev, "free_resource_mr failed");
  2173. return -EIO;
  2174. }
  2175. kfree(adapter);
  2176. return 0;
  2177. }
  2178. static int check_module_parm(void)
  2179. {
  2180. int ret = 0;
  2181. if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
  2182. (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
  2183. ehea_info("Bad parameter: rq1_entries");
  2184. ret = -EINVAL;
  2185. }
  2186. if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
  2187. (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
  2188. ehea_info("Bad parameter: rq2_entries");
  2189. ret = -EINVAL;
  2190. }
  2191. if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
  2192. (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
  2193. ehea_info("Bad parameter: rq3_entries");
  2194. ret = -EINVAL;
  2195. }
  2196. if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
  2197. (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
  2198. ehea_info("Bad parameter: sq_entries");
  2199. ret = -EINVAL;
  2200. }
  2201. return ret;
  2202. }
  2203. static struct of_device_id ehea_device_table[] = {
  2204. {
  2205. .name = "lhea",
  2206. .compatible = "IBM,lhea",
  2207. },
  2208. {},
  2209. };
  2210. static struct ibmebus_driver ehea_driver = {
  2211. .name = "ehea",
  2212. .id_table = ehea_device_table,
  2213. .probe = ehea_probe,
  2214. .remove = ehea_remove,
  2215. };
  2216. int __init ehea_module_init(void)
  2217. {
  2218. int ret;
  2219. printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
  2220. DRV_VERSION);
  2221. ret = check_module_parm();
  2222. if (ret)
  2223. goto out;
  2224. ret = ibmebus_register_driver(&ehea_driver);
  2225. if (ret)
  2226. ehea_error("failed registering eHEA device driver on ebus");
  2227. out:
  2228. return ret;
  2229. }
  2230. static void __exit ehea_module_exit(void)
  2231. {
  2232. ibmebus_unregister_driver(&ehea_driver);
  2233. }
  2234. module_init(ehea_module_init);
  2235. module_exit(ehea_module_exit);