enic_main.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/string.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/pci.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_vlan.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/in.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/tcp.h>
  37. #include <linux/rtnetlink.h>
  38. #include <linux/prefetch.h>
  39. #include <net/ip6_checksum.h>
  40. #include "cq_enet_desc.h"
  41. #include "vnic_dev.h"
  42. #include "vnic_intr.h"
  43. #include "vnic_stats.h"
  44. #include "vnic_vic.h"
  45. #include "enic_res.h"
  46. #include "enic.h"
  47. #include "enic_dev.h"
  48. #include "enic_pp.h"
  49. #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
  50. #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
  51. #define MAX_TSO (1 << 16)
  52. #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
  53. #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
  54. #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
  55. /* Supported devices */
  56. static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
  57. { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
  58. { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
  59. { 0, } /* end of table */
  60. };
  61. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  62. MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
  63. MODULE_LICENSE("GPL");
  64. MODULE_VERSION(DRV_VERSION);
  65. MODULE_DEVICE_TABLE(pci, enic_id_table);
  66. struct enic_stat {
  67. char name[ETH_GSTRING_LEN];
  68. unsigned int offset;
  69. };
  70. #define ENIC_TX_STAT(stat) \
  71. { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
  72. #define ENIC_RX_STAT(stat) \
  73. { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
  74. static const struct enic_stat enic_tx_stats[] = {
  75. ENIC_TX_STAT(tx_frames_ok),
  76. ENIC_TX_STAT(tx_unicast_frames_ok),
  77. ENIC_TX_STAT(tx_multicast_frames_ok),
  78. ENIC_TX_STAT(tx_broadcast_frames_ok),
  79. ENIC_TX_STAT(tx_bytes_ok),
  80. ENIC_TX_STAT(tx_unicast_bytes_ok),
  81. ENIC_TX_STAT(tx_multicast_bytes_ok),
  82. ENIC_TX_STAT(tx_broadcast_bytes_ok),
  83. ENIC_TX_STAT(tx_drops),
  84. ENIC_TX_STAT(tx_errors),
  85. ENIC_TX_STAT(tx_tso),
  86. };
  87. static const struct enic_stat enic_rx_stats[] = {
  88. ENIC_RX_STAT(rx_frames_ok),
  89. ENIC_RX_STAT(rx_frames_total),
  90. ENIC_RX_STAT(rx_unicast_frames_ok),
  91. ENIC_RX_STAT(rx_multicast_frames_ok),
  92. ENIC_RX_STAT(rx_broadcast_frames_ok),
  93. ENIC_RX_STAT(rx_bytes_ok),
  94. ENIC_RX_STAT(rx_unicast_bytes_ok),
  95. ENIC_RX_STAT(rx_multicast_bytes_ok),
  96. ENIC_RX_STAT(rx_broadcast_bytes_ok),
  97. ENIC_RX_STAT(rx_drop),
  98. ENIC_RX_STAT(rx_no_bufs),
  99. ENIC_RX_STAT(rx_errors),
  100. ENIC_RX_STAT(rx_rss),
  101. ENIC_RX_STAT(rx_crc_errors),
  102. ENIC_RX_STAT(rx_frames_64),
  103. ENIC_RX_STAT(rx_frames_127),
  104. ENIC_RX_STAT(rx_frames_255),
  105. ENIC_RX_STAT(rx_frames_511),
  106. ENIC_RX_STAT(rx_frames_1023),
  107. ENIC_RX_STAT(rx_frames_1518),
  108. ENIC_RX_STAT(rx_frames_to_max),
  109. };
  110. static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
  111. static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
  112. static int enic_is_dynamic(struct enic *enic)
  113. {
  114. return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
  115. }
  116. static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
  117. {
  118. return rq;
  119. }
  120. static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
  121. {
  122. return enic->rq_count + wq;
  123. }
  124. static inline unsigned int enic_legacy_io_intr(void)
  125. {
  126. return 0;
  127. }
  128. static inline unsigned int enic_legacy_err_intr(void)
  129. {
  130. return 1;
  131. }
  132. static inline unsigned int enic_legacy_notify_intr(void)
  133. {
  134. return 2;
  135. }
  136. static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
  137. {
  138. return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
  139. }
  140. static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
  141. {
  142. return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
  143. }
  144. static inline unsigned int enic_msix_err_intr(struct enic *enic)
  145. {
  146. return enic->rq_count + enic->wq_count;
  147. }
  148. static inline unsigned int enic_msix_notify_intr(struct enic *enic)
  149. {
  150. return enic->rq_count + enic->wq_count + 1;
  151. }
  152. static int enic_get_settings(struct net_device *netdev,
  153. struct ethtool_cmd *ecmd)
  154. {
  155. struct enic *enic = netdev_priv(netdev);
  156. ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
  157. ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
  158. ecmd->port = PORT_FIBRE;
  159. ecmd->transceiver = XCVR_EXTERNAL;
  160. if (netif_carrier_ok(netdev)) {
  161. ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
  162. ecmd->duplex = DUPLEX_FULL;
  163. } else {
  164. ethtool_cmd_speed_set(ecmd, -1);
  165. ecmd->duplex = -1;
  166. }
  167. ecmd->autoneg = AUTONEG_DISABLE;
  168. return 0;
  169. }
  170. static void enic_get_drvinfo(struct net_device *netdev,
  171. struct ethtool_drvinfo *drvinfo)
  172. {
  173. struct enic *enic = netdev_priv(netdev);
  174. struct vnic_devcmd_fw_info *fw_info;
  175. enic_dev_fw_info(enic, &fw_info);
  176. strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  177. strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
  178. strncpy(drvinfo->fw_version, fw_info->fw_version,
  179. sizeof(drvinfo->fw_version));
  180. strncpy(drvinfo->bus_info, pci_name(enic->pdev),
  181. sizeof(drvinfo->bus_info));
  182. }
  183. static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  184. {
  185. unsigned int i;
  186. switch (stringset) {
  187. case ETH_SS_STATS:
  188. for (i = 0; i < enic_n_tx_stats; i++) {
  189. memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
  190. data += ETH_GSTRING_LEN;
  191. }
  192. for (i = 0; i < enic_n_rx_stats; i++) {
  193. memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
  194. data += ETH_GSTRING_LEN;
  195. }
  196. break;
  197. }
  198. }
  199. static int enic_get_sset_count(struct net_device *netdev, int sset)
  200. {
  201. switch (sset) {
  202. case ETH_SS_STATS:
  203. return enic_n_tx_stats + enic_n_rx_stats;
  204. default:
  205. return -EOPNOTSUPP;
  206. }
  207. }
  208. static void enic_get_ethtool_stats(struct net_device *netdev,
  209. struct ethtool_stats *stats, u64 *data)
  210. {
  211. struct enic *enic = netdev_priv(netdev);
  212. struct vnic_stats *vstats;
  213. unsigned int i;
  214. enic_dev_stats_dump(enic, &vstats);
  215. for (i = 0; i < enic_n_tx_stats; i++)
  216. *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
  217. for (i = 0; i < enic_n_rx_stats; i++)
  218. *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
  219. }
  220. static u32 enic_get_msglevel(struct net_device *netdev)
  221. {
  222. struct enic *enic = netdev_priv(netdev);
  223. return enic->msg_enable;
  224. }
  225. static void enic_set_msglevel(struct net_device *netdev, u32 value)
  226. {
  227. struct enic *enic = netdev_priv(netdev);
  228. enic->msg_enable = value;
  229. }
  230. static int enic_get_coalesce(struct net_device *netdev,
  231. struct ethtool_coalesce *ecmd)
  232. {
  233. struct enic *enic = netdev_priv(netdev);
  234. ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
  235. ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
  236. return 0;
  237. }
  238. static int enic_set_coalesce(struct net_device *netdev,
  239. struct ethtool_coalesce *ecmd)
  240. {
  241. struct enic *enic = netdev_priv(netdev);
  242. u32 tx_coalesce_usecs;
  243. u32 rx_coalesce_usecs;
  244. unsigned int i, intr;
  245. tx_coalesce_usecs = min_t(u32,
  246. INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
  247. ecmd->tx_coalesce_usecs);
  248. rx_coalesce_usecs = min_t(u32,
  249. INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
  250. ecmd->rx_coalesce_usecs);
  251. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  252. case VNIC_DEV_INTR_MODE_INTX:
  253. if (tx_coalesce_usecs != rx_coalesce_usecs)
  254. return -EINVAL;
  255. intr = enic_legacy_io_intr();
  256. vnic_intr_coalescing_timer_set(&enic->intr[intr],
  257. INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
  258. break;
  259. case VNIC_DEV_INTR_MODE_MSI:
  260. if (tx_coalesce_usecs != rx_coalesce_usecs)
  261. return -EINVAL;
  262. vnic_intr_coalescing_timer_set(&enic->intr[0],
  263. INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
  264. break;
  265. case VNIC_DEV_INTR_MODE_MSIX:
  266. for (i = 0; i < enic->wq_count; i++) {
  267. intr = enic_msix_wq_intr(enic, i);
  268. vnic_intr_coalescing_timer_set(&enic->intr[intr],
  269. INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
  270. }
  271. for (i = 0; i < enic->rq_count; i++) {
  272. intr = enic_msix_rq_intr(enic, i);
  273. vnic_intr_coalescing_timer_set(&enic->intr[intr],
  274. INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
  275. }
  276. break;
  277. default:
  278. break;
  279. }
  280. enic->tx_coalesce_usecs = tx_coalesce_usecs;
  281. enic->rx_coalesce_usecs = rx_coalesce_usecs;
  282. return 0;
  283. }
  284. static const struct ethtool_ops enic_ethtool_ops = {
  285. .get_settings = enic_get_settings,
  286. .get_drvinfo = enic_get_drvinfo,
  287. .get_msglevel = enic_get_msglevel,
  288. .set_msglevel = enic_set_msglevel,
  289. .get_link = ethtool_op_get_link,
  290. .get_strings = enic_get_strings,
  291. .get_sset_count = enic_get_sset_count,
  292. .get_ethtool_stats = enic_get_ethtool_stats,
  293. .get_coalesce = enic_get_coalesce,
  294. .set_coalesce = enic_set_coalesce,
  295. };
  296. static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  297. {
  298. struct enic *enic = vnic_dev_priv(wq->vdev);
  299. if (buf->sop)
  300. pci_unmap_single(enic->pdev, buf->dma_addr,
  301. buf->len, PCI_DMA_TODEVICE);
  302. else
  303. pci_unmap_page(enic->pdev, buf->dma_addr,
  304. buf->len, PCI_DMA_TODEVICE);
  305. if (buf->os_buf)
  306. dev_kfree_skb_any(buf->os_buf);
  307. }
  308. static void enic_wq_free_buf(struct vnic_wq *wq,
  309. struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
  310. {
  311. enic_free_wq_buf(wq, buf);
  312. }
  313. static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
  314. u8 type, u16 q_number, u16 completed_index, void *opaque)
  315. {
  316. struct enic *enic = vnic_dev_priv(vdev);
  317. spin_lock(&enic->wq_lock[q_number]);
  318. vnic_wq_service(&enic->wq[q_number], cq_desc,
  319. completed_index, enic_wq_free_buf,
  320. opaque);
  321. if (netif_queue_stopped(enic->netdev) &&
  322. vnic_wq_desc_avail(&enic->wq[q_number]) >=
  323. (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
  324. netif_wake_queue(enic->netdev);
  325. spin_unlock(&enic->wq_lock[q_number]);
  326. return 0;
  327. }
  328. static void enic_log_q_error(struct enic *enic)
  329. {
  330. unsigned int i;
  331. u32 error_status;
  332. for (i = 0; i < enic->wq_count; i++) {
  333. error_status = vnic_wq_error_status(&enic->wq[i]);
  334. if (error_status)
  335. netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
  336. i, error_status);
  337. }
  338. for (i = 0; i < enic->rq_count; i++) {
  339. error_status = vnic_rq_error_status(&enic->rq[i]);
  340. if (error_status)
  341. netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
  342. i, error_status);
  343. }
  344. }
  345. static void enic_msglvl_check(struct enic *enic)
  346. {
  347. u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
  348. if (msg_enable != enic->msg_enable) {
  349. netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
  350. enic->msg_enable, msg_enable);
  351. enic->msg_enable = msg_enable;
  352. }
  353. }
  354. static void enic_mtu_check(struct enic *enic)
  355. {
  356. u32 mtu = vnic_dev_mtu(enic->vdev);
  357. struct net_device *netdev = enic->netdev;
  358. if (mtu && mtu != enic->port_mtu) {
  359. enic->port_mtu = mtu;
  360. if (enic_is_dynamic(enic)) {
  361. mtu = max_t(int, ENIC_MIN_MTU,
  362. min_t(int, ENIC_MAX_MTU, mtu));
  363. if (mtu != netdev->mtu)
  364. schedule_work(&enic->change_mtu_work);
  365. } else {
  366. if (mtu < netdev->mtu)
  367. netdev_warn(netdev,
  368. "interface MTU (%d) set higher "
  369. "than switch port MTU (%d)\n",
  370. netdev->mtu, mtu);
  371. }
  372. }
  373. }
  374. static void enic_link_check(struct enic *enic)
  375. {
  376. int link_status = vnic_dev_link_status(enic->vdev);
  377. int carrier_ok = netif_carrier_ok(enic->netdev);
  378. if (link_status && !carrier_ok) {
  379. netdev_info(enic->netdev, "Link UP\n");
  380. netif_carrier_on(enic->netdev);
  381. } else if (!link_status && carrier_ok) {
  382. netdev_info(enic->netdev, "Link DOWN\n");
  383. netif_carrier_off(enic->netdev);
  384. }
  385. }
  386. static void enic_notify_check(struct enic *enic)
  387. {
  388. enic_msglvl_check(enic);
  389. enic_mtu_check(enic);
  390. enic_link_check(enic);
  391. }
  392. #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
  393. static irqreturn_t enic_isr_legacy(int irq, void *data)
  394. {
  395. struct net_device *netdev = data;
  396. struct enic *enic = netdev_priv(netdev);
  397. unsigned int io_intr = enic_legacy_io_intr();
  398. unsigned int err_intr = enic_legacy_err_intr();
  399. unsigned int notify_intr = enic_legacy_notify_intr();
  400. u32 pba;
  401. vnic_intr_mask(&enic->intr[io_intr]);
  402. pba = vnic_intr_legacy_pba(enic->legacy_pba);
  403. if (!pba) {
  404. vnic_intr_unmask(&enic->intr[io_intr]);
  405. return IRQ_NONE; /* not our interrupt */
  406. }
  407. if (ENIC_TEST_INTR(pba, notify_intr)) {
  408. vnic_intr_return_all_credits(&enic->intr[notify_intr]);
  409. enic_notify_check(enic);
  410. }
  411. if (ENIC_TEST_INTR(pba, err_intr)) {
  412. vnic_intr_return_all_credits(&enic->intr[err_intr]);
  413. enic_log_q_error(enic);
  414. /* schedule recovery from WQ/RQ error */
  415. schedule_work(&enic->reset);
  416. return IRQ_HANDLED;
  417. }
  418. if (ENIC_TEST_INTR(pba, io_intr)) {
  419. if (napi_schedule_prep(&enic->napi[0]))
  420. __napi_schedule(&enic->napi[0]);
  421. } else {
  422. vnic_intr_unmask(&enic->intr[io_intr]);
  423. }
  424. return IRQ_HANDLED;
  425. }
  426. static irqreturn_t enic_isr_msi(int irq, void *data)
  427. {
  428. struct enic *enic = data;
  429. /* With MSI, there is no sharing of interrupts, so this is
  430. * our interrupt and there is no need to ack it. The device
  431. * is not providing per-vector masking, so the OS will not
  432. * write to PCI config space to mask/unmask the interrupt.
  433. * We're using mask_on_assertion for MSI, so the device
  434. * automatically masks the interrupt when the interrupt is
  435. * generated. Later, when exiting polling, the interrupt
  436. * will be unmasked (see enic_poll).
  437. *
  438. * Also, the device uses the same PCIe Traffic Class (TC)
  439. * for Memory Write data and MSI, so there are no ordering
  440. * issues; the MSI will always arrive at the Root Complex
  441. * _after_ corresponding Memory Writes (i.e. descriptor
  442. * writes).
  443. */
  444. napi_schedule(&enic->napi[0]);
  445. return IRQ_HANDLED;
  446. }
  447. static irqreturn_t enic_isr_msix_rq(int irq, void *data)
  448. {
  449. struct napi_struct *napi = data;
  450. /* schedule NAPI polling for RQ cleanup */
  451. napi_schedule(napi);
  452. return IRQ_HANDLED;
  453. }
  454. static irqreturn_t enic_isr_msix_wq(int irq, void *data)
  455. {
  456. struct enic *enic = data;
  457. unsigned int cq = enic_cq_wq(enic, 0);
  458. unsigned int intr = enic_msix_wq_intr(enic, 0);
  459. unsigned int wq_work_to_do = -1; /* no limit */
  460. unsigned int wq_work_done;
  461. wq_work_done = vnic_cq_service(&enic->cq[cq],
  462. wq_work_to_do, enic_wq_service, NULL);
  463. vnic_intr_return_credits(&enic->intr[intr],
  464. wq_work_done,
  465. 1 /* unmask intr */,
  466. 1 /* reset intr timer */);
  467. return IRQ_HANDLED;
  468. }
  469. static irqreturn_t enic_isr_msix_err(int irq, void *data)
  470. {
  471. struct enic *enic = data;
  472. unsigned int intr = enic_msix_err_intr(enic);
  473. vnic_intr_return_all_credits(&enic->intr[intr]);
  474. enic_log_q_error(enic);
  475. /* schedule recovery from WQ/RQ error */
  476. schedule_work(&enic->reset);
  477. return IRQ_HANDLED;
  478. }
  479. static irqreturn_t enic_isr_msix_notify(int irq, void *data)
  480. {
  481. struct enic *enic = data;
  482. unsigned int intr = enic_msix_notify_intr(enic);
  483. vnic_intr_return_all_credits(&enic->intr[intr]);
  484. enic_notify_check(enic);
  485. return IRQ_HANDLED;
  486. }
  487. static inline void enic_queue_wq_skb_cont(struct enic *enic,
  488. struct vnic_wq *wq, struct sk_buff *skb,
  489. unsigned int len_left, int loopback)
  490. {
  491. skb_frag_t *frag;
  492. /* Queue additional data fragments */
  493. for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
  494. len_left -= frag->size;
  495. enic_queue_wq_desc_cont(wq, skb,
  496. pci_map_page(enic->pdev, frag->page,
  497. frag->page_offset, frag->size,
  498. PCI_DMA_TODEVICE),
  499. frag->size,
  500. (len_left == 0), /* EOP? */
  501. loopback);
  502. }
  503. }
  504. static inline void enic_queue_wq_skb_vlan(struct enic *enic,
  505. struct vnic_wq *wq, struct sk_buff *skb,
  506. int vlan_tag_insert, unsigned int vlan_tag, int loopback)
  507. {
  508. unsigned int head_len = skb_headlen(skb);
  509. unsigned int len_left = skb->len - head_len;
  510. int eop = (len_left == 0);
  511. /* Queue the main skb fragment. The fragments are no larger
  512. * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
  513. * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
  514. * per fragment is queued.
  515. */
  516. enic_queue_wq_desc(wq, skb,
  517. pci_map_single(enic->pdev, skb->data,
  518. head_len, PCI_DMA_TODEVICE),
  519. head_len,
  520. vlan_tag_insert, vlan_tag,
  521. eop, loopback);
  522. if (!eop)
  523. enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
  524. }
  525. static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
  526. struct vnic_wq *wq, struct sk_buff *skb,
  527. int vlan_tag_insert, unsigned int vlan_tag, int loopback)
  528. {
  529. unsigned int head_len = skb_headlen(skb);
  530. unsigned int len_left = skb->len - head_len;
  531. unsigned int hdr_len = skb_checksum_start_offset(skb);
  532. unsigned int csum_offset = hdr_len + skb->csum_offset;
  533. int eop = (len_left == 0);
  534. /* Queue the main skb fragment. The fragments are no larger
  535. * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
  536. * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
  537. * per fragment is queued.
  538. */
  539. enic_queue_wq_desc_csum_l4(wq, skb,
  540. pci_map_single(enic->pdev, skb->data,
  541. head_len, PCI_DMA_TODEVICE),
  542. head_len,
  543. csum_offset,
  544. hdr_len,
  545. vlan_tag_insert, vlan_tag,
  546. eop, loopback);
  547. if (!eop)
  548. enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
  549. }
  550. static inline void enic_queue_wq_skb_tso(struct enic *enic,
  551. struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
  552. int vlan_tag_insert, unsigned int vlan_tag, int loopback)
  553. {
  554. unsigned int frag_len_left = skb_headlen(skb);
  555. unsigned int len_left = skb->len - frag_len_left;
  556. unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  557. int eop = (len_left == 0);
  558. unsigned int len;
  559. dma_addr_t dma_addr;
  560. unsigned int offset = 0;
  561. skb_frag_t *frag;
  562. /* Preload TCP csum field with IP pseudo hdr calculated
  563. * with IP length set to zero. HW will later add in length
  564. * to each TCP segment resulting from the TSO.
  565. */
  566. if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
  567. ip_hdr(skb)->check = 0;
  568. tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  569. ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  570. } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
  571. tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  572. &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  573. }
  574. /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
  575. * for the main skb fragment
  576. */
  577. while (frag_len_left) {
  578. len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
  579. dma_addr = pci_map_single(enic->pdev, skb->data + offset,
  580. len, PCI_DMA_TODEVICE);
  581. enic_queue_wq_desc_tso(wq, skb,
  582. dma_addr,
  583. len,
  584. mss, hdr_len,
  585. vlan_tag_insert, vlan_tag,
  586. eop && (len == frag_len_left), loopback);
  587. frag_len_left -= len;
  588. offset += len;
  589. }
  590. if (eop)
  591. return;
  592. /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
  593. * for additional data fragments
  594. */
  595. for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
  596. len_left -= frag->size;
  597. frag_len_left = frag->size;
  598. offset = frag->page_offset;
  599. while (frag_len_left) {
  600. len = min(frag_len_left,
  601. (unsigned int)WQ_ENET_MAX_DESC_LEN);
  602. dma_addr = pci_map_page(enic->pdev, frag->page,
  603. offset, len,
  604. PCI_DMA_TODEVICE);
  605. enic_queue_wq_desc_cont(wq, skb,
  606. dma_addr,
  607. len,
  608. (len_left == 0) &&
  609. (len == frag_len_left), /* EOP? */
  610. loopback);
  611. frag_len_left -= len;
  612. offset += len;
  613. }
  614. }
  615. }
  616. static inline void enic_queue_wq_skb(struct enic *enic,
  617. struct vnic_wq *wq, struct sk_buff *skb)
  618. {
  619. unsigned int mss = skb_shinfo(skb)->gso_size;
  620. unsigned int vlan_tag = 0;
  621. int vlan_tag_insert = 0;
  622. int loopback = 0;
  623. if (vlan_tx_tag_present(skb)) {
  624. /* VLAN tag from trunking driver */
  625. vlan_tag_insert = 1;
  626. vlan_tag = vlan_tx_tag_get(skb);
  627. } else if (enic->loop_enable) {
  628. vlan_tag = enic->loop_tag;
  629. loopback = 1;
  630. }
  631. if (mss)
  632. enic_queue_wq_skb_tso(enic, wq, skb, mss,
  633. vlan_tag_insert, vlan_tag, loopback);
  634. else if (skb->ip_summed == CHECKSUM_PARTIAL)
  635. enic_queue_wq_skb_csum_l4(enic, wq, skb,
  636. vlan_tag_insert, vlan_tag, loopback);
  637. else
  638. enic_queue_wq_skb_vlan(enic, wq, skb,
  639. vlan_tag_insert, vlan_tag, loopback);
  640. }
  641. /* netif_tx_lock held, process context with BHs disabled, or BH */
  642. static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
  643. struct net_device *netdev)
  644. {
  645. struct enic *enic = netdev_priv(netdev);
  646. struct vnic_wq *wq = &enic->wq[0];
  647. unsigned long flags;
  648. if (skb->len <= 0) {
  649. dev_kfree_skb(skb);
  650. return NETDEV_TX_OK;
  651. }
  652. /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
  653. * which is very likely. In the off chance it's going to take
  654. * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
  655. */
  656. if (skb_shinfo(skb)->gso_size == 0 &&
  657. skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
  658. skb_linearize(skb)) {
  659. dev_kfree_skb(skb);
  660. return NETDEV_TX_OK;
  661. }
  662. spin_lock_irqsave(&enic->wq_lock[0], flags);
  663. if (vnic_wq_desc_avail(wq) <
  664. skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
  665. netif_stop_queue(netdev);
  666. /* This is a hard error, log it */
  667. netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
  668. spin_unlock_irqrestore(&enic->wq_lock[0], flags);
  669. return NETDEV_TX_BUSY;
  670. }
  671. enic_queue_wq_skb(enic, wq, skb);
  672. if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
  673. netif_stop_queue(netdev);
  674. spin_unlock_irqrestore(&enic->wq_lock[0], flags);
  675. return NETDEV_TX_OK;
  676. }
  677. /* dev_base_lock rwlock held, nominally process context */
  678. static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
  679. struct rtnl_link_stats64 *net_stats)
  680. {
  681. struct enic *enic = netdev_priv(netdev);
  682. struct vnic_stats *stats;
  683. enic_dev_stats_dump(enic, &stats);
  684. net_stats->tx_packets = stats->tx.tx_frames_ok;
  685. net_stats->tx_bytes = stats->tx.tx_bytes_ok;
  686. net_stats->tx_errors = stats->tx.tx_errors;
  687. net_stats->tx_dropped = stats->tx.tx_drops;
  688. net_stats->rx_packets = stats->rx.rx_frames_ok;
  689. net_stats->rx_bytes = stats->rx.rx_bytes_ok;
  690. net_stats->rx_errors = stats->rx.rx_errors;
  691. net_stats->multicast = stats->rx.rx_multicast_frames_ok;
  692. net_stats->rx_over_errors = enic->rq_truncated_pkts;
  693. net_stats->rx_crc_errors = enic->rq_bad_fcs;
  694. net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
  695. return net_stats;
  696. }
  697. void enic_reset_addr_lists(struct enic *enic)
  698. {
  699. enic->mc_count = 0;
  700. enic->uc_count = 0;
  701. enic->flags = 0;
  702. }
  703. static int enic_set_mac_addr(struct net_device *netdev, char *addr)
  704. {
  705. struct enic *enic = netdev_priv(netdev);
  706. if (enic_is_dynamic(enic)) {
  707. if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
  708. return -EADDRNOTAVAIL;
  709. } else {
  710. if (!is_valid_ether_addr(addr))
  711. return -EADDRNOTAVAIL;
  712. }
  713. memcpy(netdev->dev_addr, addr, netdev->addr_len);
  714. return 0;
  715. }
  716. static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
  717. {
  718. struct enic *enic = netdev_priv(netdev);
  719. struct sockaddr *saddr = p;
  720. char *addr = saddr->sa_data;
  721. int err;
  722. if (netif_running(enic->netdev)) {
  723. err = enic_dev_del_station_addr(enic);
  724. if (err)
  725. return err;
  726. }
  727. err = enic_set_mac_addr(netdev, addr);
  728. if (err)
  729. return err;
  730. if (netif_running(enic->netdev)) {
  731. err = enic_dev_add_station_addr(enic);
  732. if (err)
  733. return err;
  734. }
  735. return err;
  736. }
  737. static int enic_set_mac_address(struct net_device *netdev, void *p)
  738. {
  739. struct sockaddr *saddr = p;
  740. char *addr = saddr->sa_data;
  741. struct enic *enic = netdev_priv(netdev);
  742. int err;
  743. err = enic_dev_del_station_addr(enic);
  744. if (err)
  745. return err;
  746. err = enic_set_mac_addr(netdev, addr);
  747. if (err)
  748. return err;
  749. return enic_dev_add_station_addr(enic);
  750. }
  751. static void enic_update_multicast_addr_list(struct enic *enic)
  752. {
  753. struct net_device *netdev = enic->netdev;
  754. struct netdev_hw_addr *ha;
  755. unsigned int mc_count = netdev_mc_count(netdev);
  756. u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
  757. unsigned int i, j;
  758. if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
  759. netdev_warn(netdev, "Registering only %d out of %d "
  760. "multicast addresses\n",
  761. ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
  762. mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
  763. }
  764. /* Is there an easier way? Trying to minimize to
  765. * calls to add/del multicast addrs. We keep the
  766. * addrs from the last call in enic->mc_addr and
  767. * look for changes to add/del.
  768. */
  769. i = 0;
  770. netdev_for_each_mc_addr(ha, netdev) {
  771. if (i == mc_count)
  772. break;
  773. memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
  774. }
  775. for (i = 0; i < enic->mc_count; i++) {
  776. for (j = 0; j < mc_count; j++)
  777. if (compare_ether_addr(enic->mc_addr[i],
  778. mc_addr[j]) == 0)
  779. break;
  780. if (j == mc_count)
  781. enic_dev_del_addr(enic, enic->mc_addr[i]);
  782. }
  783. for (i = 0; i < mc_count; i++) {
  784. for (j = 0; j < enic->mc_count; j++)
  785. if (compare_ether_addr(mc_addr[i],
  786. enic->mc_addr[j]) == 0)
  787. break;
  788. if (j == enic->mc_count)
  789. enic_dev_add_addr(enic, mc_addr[i]);
  790. }
  791. /* Save the list to compare against next time
  792. */
  793. for (i = 0; i < mc_count; i++)
  794. memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
  795. enic->mc_count = mc_count;
  796. }
  797. static void enic_update_unicast_addr_list(struct enic *enic)
  798. {
  799. struct net_device *netdev = enic->netdev;
  800. struct netdev_hw_addr *ha;
  801. unsigned int uc_count = netdev_uc_count(netdev);
  802. u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
  803. unsigned int i, j;
  804. if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
  805. netdev_warn(netdev, "Registering only %d out of %d "
  806. "unicast addresses\n",
  807. ENIC_UNICAST_PERFECT_FILTERS, uc_count);
  808. uc_count = ENIC_UNICAST_PERFECT_FILTERS;
  809. }
  810. /* Is there an easier way? Trying to minimize to
  811. * calls to add/del unicast addrs. We keep the
  812. * addrs from the last call in enic->uc_addr and
  813. * look for changes to add/del.
  814. */
  815. i = 0;
  816. netdev_for_each_uc_addr(ha, netdev) {
  817. if (i == uc_count)
  818. break;
  819. memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
  820. }
  821. for (i = 0; i < enic->uc_count; i++) {
  822. for (j = 0; j < uc_count; j++)
  823. if (compare_ether_addr(enic->uc_addr[i],
  824. uc_addr[j]) == 0)
  825. break;
  826. if (j == uc_count)
  827. enic_dev_del_addr(enic, enic->uc_addr[i]);
  828. }
  829. for (i = 0; i < uc_count; i++) {
  830. for (j = 0; j < enic->uc_count; j++)
  831. if (compare_ether_addr(uc_addr[i],
  832. enic->uc_addr[j]) == 0)
  833. break;
  834. if (j == enic->uc_count)
  835. enic_dev_add_addr(enic, uc_addr[i]);
  836. }
  837. /* Save the list to compare against next time
  838. */
  839. for (i = 0; i < uc_count; i++)
  840. memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
  841. enic->uc_count = uc_count;
  842. }
  843. /* netif_tx_lock held, BHs disabled */
  844. static void enic_set_rx_mode(struct net_device *netdev)
  845. {
  846. struct enic *enic = netdev_priv(netdev);
  847. int directed = 1;
  848. int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
  849. int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
  850. int promisc = (netdev->flags & IFF_PROMISC) ||
  851. netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
  852. int allmulti = (netdev->flags & IFF_ALLMULTI) ||
  853. netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
  854. unsigned int flags = netdev->flags |
  855. (allmulti ? IFF_ALLMULTI : 0) |
  856. (promisc ? IFF_PROMISC : 0);
  857. if (enic->flags != flags) {
  858. enic->flags = flags;
  859. enic_dev_packet_filter(enic, directed,
  860. multicast, broadcast, promisc, allmulti);
  861. }
  862. if (!promisc) {
  863. enic_update_unicast_addr_list(enic);
  864. if (!allmulti)
  865. enic_update_multicast_addr_list(enic);
  866. }
  867. }
  868. /* rtnl lock is held */
  869. static void enic_vlan_rx_register(struct net_device *netdev,
  870. struct vlan_group *vlan_group)
  871. {
  872. struct enic *enic = netdev_priv(netdev);
  873. enic->vlan_group = vlan_group;
  874. }
  875. /* netif_tx_lock held, BHs disabled */
  876. static void enic_tx_timeout(struct net_device *netdev)
  877. {
  878. struct enic *enic = netdev_priv(netdev);
  879. schedule_work(&enic->reset);
  880. }
  881. static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  882. {
  883. struct enic *enic = netdev_priv(netdev);
  884. if (vf != PORT_SELF_VF)
  885. return -EOPNOTSUPP;
  886. /* Ignore the vf argument for now. We can assume the request
  887. * is coming on a vf.
  888. */
  889. if (is_valid_ether_addr(mac)) {
  890. memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
  891. return 0;
  892. } else
  893. return -EINVAL;
  894. }
  895. static int enic_set_vf_port(struct net_device *netdev, int vf,
  896. struct nlattr *port[])
  897. {
  898. struct enic *enic = netdev_priv(netdev);
  899. struct enic_port_profile prev_pp;
  900. int err = 0, restore_pp = 1;
  901. /* don't support VFs, yet */
  902. if (vf != PORT_SELF_VF)
  903. return -EOPNOTSUPP;
  904. if (!port[IFLA_PORT_REQUEST])
  905. return -EOPNOTSUPP;
  906. memcpy(&prev_pp, &enic->pp, sizeof(enic->pp));
  907. memset(&enic->pp, 0, sizeof(enic->pp));
  908. enic->pp.set |= ENIC_SET_REQUEST;
  909. enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
  910. if (port[IFLA_PORT_PROFILE]) {
  911. enic->pp.set |= ENIC_SET_NAME;
  912. memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
  913. PORT_PROFILE_MAX);
  914. }
  915. if (port[IFLA_PORT_INSTANCE_UUID]) {
  916. enic->pp.set |= ENIC_SET_INSTANCE;
  917. memcpy(enic->pp.instance_uuid,
  918. nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
  919. }
  920. if (port[IFLA_PORT_HOST_UUID]) {
  921. enic->pp.set |= ENIC_SET_HOST;
  922. memcpy(enic->pp.host_uuid,
  923. nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
  924. }
  925. /* Special case handling: mac came from IFLA_VF_MAC */
  926. if (!is_zero_ether_addr(prev_pp.vf_mac))
  927. memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN);
  928. if (is_zero_ether_addr(netdev->dev_addr))
  929. random_ether_addr(netdev->dev_addr);
  930. err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp);
  931. if (err) {
  932. if (restore_pp) {
  933. /* Things are still the way they were: Implicit
  934. * DISASSOCIATE failed
  935. */
  936. memcpy(&enic->pp, &prev_pp, sizeof(enic->pp));
  937. } else {
  938. memset(&enic->pp, 0, sizeof(enic->pp));
  939. memset(netdev->dev_addr, 0, ETH_ALEN);
  940. }
  941. } else {
  942. /* Set flag to indicate that the port assoc/disassoc
  943. * request has been sent out to fw
  944. */
  945. enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
  946. /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
  947. if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
  948. memset(enic->pp.mac_addr, 0, ETH_ALEN);
  949. memset(netdev->dev_addr, 0, ETH_ALEN);
  950. }
  951. }
  952. memset(enic->pp.vf_mac, 0, ETH_ALEN);
  953. return err;
  954. }
  955. static int enic_get_vf_port(struct net_device *netdev, int vf,
  956. struct sk_buff *skb)
  957. {
  958. struct enic *enic = netdev_priv(netdev);
  959. u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
  960. int err;
  961. if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
  962. return -ENODATA;
  963. err = enic_process_get_pp_request(enic, enic->pp.request, &response);
  964. if (err)
  965. return err;
  966. NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
  967. NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
  968. if (enic->pp.set & ENIC_SET_NAME)
  969. NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
  970. enic->pp.name);
  971. if (enic->pp.set & ENIC_SET_INSTANCE)
  972. NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
  973. enic->pp.instance_uuid);
  974. if (enic->pp.set & ENIC_SET_HOST)
  975. NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
  976. enic->pp.host_uuid);
  977. return 0;
  978. nla_put_failure:
  979. return -EMSGSIZE;
  980. }
  981. static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  982. {
  983. struct enic *enic = vnic_dev_priv(rq->vdev);
  984. if (!buf->os_buf)
  985. return;
  986. pci_unmap_single(enic->pdev, buf->dma_addr,
  987. buf->len, PCI_DMA_FROMDEVICE);
  988. dev_kfree_skb_any(buf->os_buf);
  989. }
  990. static int enic_rq_alloc_buf(struct vnic_rq *rq)
  991. {
  992. struct enic *enic = vnic_dev_priv(rq->vdev);
  993. struct net_device *netdev = enic->netdev;
  994. struct sk_buff *skb;
  995. unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
  996. unsigned int os_buf_index = 0;
  997. dma_addr_t dma_addr;
  998. skb = netdev_alloc_skb_ip_align(netdev, len);
  999. if (!skb)
  1000. return -ENOMEM;
  1001. dma_addr = pci_map_single(enic->pdev, skb->data,
  1002. len, PCI_DMA_FROMDEVICE);
  1003. enic_queue_rq_desc(rq, skb, os_buf_index,
  1004. dma_addr, len);
  1005. return 0;
  1006. }
  1007. static void enic_rq_indicate_buf(struct vnic_rq *rq,
  1008. struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
  1009. int skipped, void *opaque)
  1010. {
  1011. struct enic *enic = vnic_dev_priv(rq->vdev);
  1012. struct net_device *netdev = enic->netdev;
  1013. struct sk_buff *skb;
  1014. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  1015. u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
  1016. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  1017. u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
  1018. u8 packet_error;
  1019. u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
  1020. u32 rss_hash;
  1021. if (skipped)
  1022. return;
  1023. skb = buf->os_buf;
  1024. prefetch(skb->data - NET_IP_ALIGN);
  1025. pci_unmap_single(enic->pdev, buf->dma_addr,
  1026. buf->len, PCI_DMA_FROMDEVICE);
  1027. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  1028. &type, &color, &q_number, &completed_index,
  1029. &ingress_port, &fcoe, &eop, &sop, &rss_type,
  1030. &csum_not_calc, &rss_hash, &bytes_written,
  1031. &packet_error, &vlan_stripped, &vlan_tci, &checksum,
  1032. &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
  1033. &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
  1034. &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
  1035. &fcs_ok);
  1036. if (packet_error) {
  1037. if (!fcs_ok) {
  1038. if (bytes_written > 0)
  1039. enic->rq_bad_fcs++;
  1040. else if (bytes_written == 0)
  1041. enic->rq_truncated_pkts++;
  1042. }
  1043. dev_kfree_skb_any(skb);
  1044. return;
  1045. }
  1046. if (eop && bytes_written > 0) {
  1047. /* Good receive
  1048. */
  1049. skb_put(skb, bytes_written);
  1050. skb->protocol = eth_type_trans(skb, netdev);
  1051. if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
  1052. skb->csum = htons(checksum);
  1053. skb->ip_summed = CHECKSUM_COMPLETE;
  1054. }
  1055. skb->dev = netdev;
  1056. if (vlan_stripped) {
  1057. if (netdev->features & NETIF_F_GRO)
  1058. vlan_gro_receive(&enic->napi[q_number],
  1059. enic->vlan_group, vlan_tci, skb);
  1060. else
  1061. vlan_hwaccel_receive_skb(skb,
  1062. enic->vlan_group, vlan_tci);
  1063. } else {
  1064. if (netdev->features & NETIF_F_GRO)
  1065. napi_gro_receive(&enic->napi[q_number], skb);
  1066. else
  1067. netif_receive_skb(skb);
  1068. }
  1069. } else {
  1070. /* Buffer overflow
  1071. */
  1072. dev_kfree_skb_any(skb);
  1073. }
  1074. }
  1075. static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
  1076. u8 type, u16 q_number, u16 completed_index, void *opaque)
  1077. {
  1078. struct enic *enic = vnic_dev_priv(vdev);
  1079. vnic_rq_service(&enic->rq[q_number], cq_desc,
  1080. completed_index, VNIC_RQ_RETURN_DESC,
  1081. enic_rq_indicate_buf, opaque);
  1082. return 0;
  1083. }
  1084. static int enic_poll(struct napi_struct *napi, int budget)
  1085. {
  1086. struct net_device *netdev = napi->dev;
  1087. struct enic *enic = netdev_priv(netdev);
  1088. unsigned int cq_rq = enic_cq_rq(enic, 0);
  1089. unsigned int cq_wq = enic_cq_wq(enic, 0);
  1090. unsigned int intr = enic_legacy_io_intr();
  1091. unsigned int rq_work_to_do = budget;
  1092. unsigned int wq_work_to_do = -1; /* no limit */
  1093. unsigned int work_done, rq_work_done, wq_work_done;
  1094. int err;
  1095. /* Service RQ (first) and WQ
  1096. */
  1097. rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
  1098. rq_work_to_do, enic_rq_service, NULL);
  1099. wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
  1100. wq_work_to_do, enic_wq_service, NULL);
  1101. /* Accumulate intr event credits for this polling
  1102. * cycle. An intr event is the completion of a
  1103. * a WQ or RQ packet.
  1104. */
  1105. work_done = rq_work_done + wq_work_done;
  1106. if (work_done > 0)
  1107. vnic_intr_return_credits(&enic->intr[intr],
  1108. work_done,
  1109. 0 /* don't unmask intr */,
  1110. 0 /* don't reset intr timer */);
  1111. err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
  1112. /* Buffer allocation failed. Stay in polling
  1113. * mode so we can try to fill the ring again.
  1114. */
  1115. if (err)
  1116. rq_work_done = rq_work_to_do;
  1117. if (rq_work_done < rq_work_to_do) {
  1118. /* Some work done, but not enough to stay in polling,
  1119. * exit polling
  1120. */
  1121. napi_complete(napi);
  1122. vnic_intr_unmask(&enic->intr[intr]);
  1123. }
  1124. return rq_work_done;
  1125. }
  1126. static int enic_poll_msix(struct napi_struct *napi, int budget)
  1127. {
  1128. struct net_device *netdev = napi->dev;
  1129. struct enic *enic = netdev_priv(netdev);
  1130. unsigned int rq = (napi - &enic->napi[0]);
  1131. unsigned int cq = enic_cq_rq(enic, rq);
  1132. unsigned int intr = enic_msix_rq_intr(enic, rq);
  1133. unsigned int work_to_do = budget;
  1134. unsigned int work_done;
  1135. int err;
  1136. /* Service RQ
  1137. */
  1138. work_done = vnic_cq_service(&enic->cq[cq],
  1139. work_to_do, enic_rq_service, NULL);
  1140. /* Return intr event credits for this polling
  1141. * cycle. An intr event is the completion of a
  1142. * RQ packet.
  1143. */
  1144. if (work_done > 0)
  1145. vnic_intr_return_credits(&enic->intr[intr],
  1146. work_done,
  1147. 0 /* don't unmask intr */,
  1148. 0 /* don't reset intr timer */);
  1149. err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
  1150. /* Buffer allocation failed. Stay in polling mode
  1151. * so we can try to fill the ring again.
  1152. */
  1153. if (err)
  1154. work_done = work_to_do;
  1155. if (work_done < work_to_do) {
  1156. /* Some work done, but not enough to stay in polling,
  1157. * exit polling
  1158. */
  1159. napi_complete(napi);
  1160. vnic_intr_unmask(&enic->intr[intr]);
  1161. }
  1162. return work_done;
  1163. }
  1164. static void enic_notify_timer(unsigned long data)
  1165. {
  1166. struct enic *enic = (struct enic *)data;
  1167. enic_notify_check(enic);
  1168. mod_timer(&enic->notify_timer,
  1169. round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
  1170. }
  1171. static void enic_free_intr(struct enic *enic)
  1172. {
  1173. struct net_device *netdev = enic->netdev;
  1174. unsigned int i;
  1175. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1176. case VNIC_DEV_INTR_MODE_INTX:
  1177. free_irq(enic->pdev->irq, netdev);
  1178. break;
  1179. case VNIC_DEV_INTR_MODE_MSI:
  1180. free_irq(enic->pdev->irq, enic);
  1181. break;
  1182. case VNIC_DEV_INTR_MODE_MSIX:
  1183. for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
  1184. if (enic->msix[i].requested)
  1185. free_irq(enic->msix_entry[i].vector,
  1186. enic->msix[i].devid);
  1187. break;
  1188. default:
  1189. break;
  1190. }
  1191. }
  1192. static int enic_request_intr(struct enic *enic)
  1193. {
  1194. struct net_device *netdev = enic->netdev;
  1195. unsigned int i, intr;
  1196. int err = 0;
  1197. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1198. case VNIC_DEV_INTR_MODE_INTX:
  1199. err = request_irq(enic->pdev->irq, enic_isr_legacy,
  1200. IRQF_SHARED, netdev->name, netdev);
  1201. break;
  1202. case VNIC_DEV_INTR_MODE_MSI:
  1203. err = request_irq(enic->pdev->irq, enic_isr_msi,
  1204. 0, netdev->name, enic);
  1205. break;
  1206. case VNIC_DEV_INTR_MODE_MSIX:
  1207. for (i = 0; i < enic->rq_count; i++) {
  1208. intr = enic_msix_rq_intr(enic, i);
  1209. sprintf(enic->msix[intr].devname,
  1210. "%.11s-rx-%d", netdev->name, i);
  1211. enic->msix[intr].isr = enic_isr_msix_rq;
  1212. enic->msix[intr].devid = &enic->napi[i];
  1213. }
  1214. for (i = 0; i < enic->wq_count; i++) {
  1215. intr = enic_msix_wq_intr(enic, i);
  1216. sprintf(enic->msix[intr].devname,
  1217. "%.11s-tx-%d", netdev->name, i);
  1218. enic->msix[intr].isr = enic_isr_msix_wq;
  1219. enic->msix[intr].devid = enic;
  1220. }
  1221. intr = enic_msix_err_intr(enic);
  1222. sprintf(enic->msix[intr].devname,
  1223. "%.11s-err", netdev->name);
  1224. enic->msix[intr].isr = enic_isr_msix_err;
  1225. enic->msix[intr].devid = enic;
  1226. intr = enic_msix_notify_intr(enic);
  1227. sprintf(enic->msix[intr].devname,
  1228. "%.11s-notify", netdev->name);
  1229. enic->msix[intr].isr = enic_isr_msix_notify;
  1230. enic->msix[intr].devid = enic;
  1231. for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
  1232. enic->msix[i].requested = 0;
  1233. for (i = 0; i < enic->intr_count; i++) {
  1234. err = request_irq(enic->msix_entry[i].vector,
  1235. enic->msix[i].isr, 0,
  1236. enic->msix[i].devname,
  1237. enic->msix[i].devid);
  1238. if (err) {
  1239. enic_free_intr(enic);
  1240. break;
  1241. }
  1242. enic->msix[i].requested = 1;
  1243. }
  1244. break;
  1245. default:
  1246. break;
  1247. }
  1248. return err;
  1249. }
  1250. static void enic_synchronize_irqs(struct enic *enic)
  1251. {
  1252. unsigned int i;
  1253. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1254. case VNIC_DEV_INTR_MODE_INTX:
  1255. case VNIC_DEV_INTR_MODE_MSI:
  1256. synchronize_irq(enic->pdev->irq);
  1257. break;
  1258. case VNIC_DEV_INTR_MODE_MSIX:
  1259. for (i = 0; i < enic->intr_count; i++)
  1260. synchronize_irq(enic->msix_entry[i].vector);
  1261. break;
  1262. default:
  1263. break;
  1264. }
  1265. }
  1266. static int enic_dev_notify_set(struct enic *enic)
  1267. {
  1268. int err;
  1269. spin_lock(&enic->devcmd_lock);
  1270. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1271. case VNIC_DEV_INTR_MODE_INTX:
  1272. err = vnic_dev_notify_set(enic->vdev,
  1273. enic_legacy_notify_intr());
  1274. break;
  1275. case VNIC_DEV_INTR_MODE_MSIX:
  1276. err = vnic_dev_notify_set(enic->vdev,
  1277. enic_msix_notify_intr(enic));
  1278. break;
  1279. default:
  1280. err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
  1281. break;
  1282. }
  1283. spin_unlock(&enic->devcmd_lock);
  1284. return err;
  1285. }
  1286. static void enic_notify_timer_start(struct enic *enic)
  1287. {
  1288. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1289. case VNIC_DEV_INTR_MODE_MSI:
  1290. mod_timer(&enic->notify_timer, jiffies);
  1291. break;
  1292. default:
  1293. /* Using intr for notification for INTx/MSI-X */
  1294. break;
  1295. }
  1296. }
  1297. /* rtnl lock is held, process context */
  1298. static int enic_open(struct net_device *netdev)
  1299. {
  1300. struct enic *enic = netdev_priv(netdev);
  1301. unsigned int i;
  1302. int err;
  1303. err = enic_request_intr(enic);
  1304. if (err) {
  1305. netdev_err(netdev, "Unable to request irq.\n");
  1306. return err;
  1307. }
  1308. err = enic_dev_notify_set(enic);
  1309. if (err) {
  1310. netdev_err(netdev,
  1311. "Failed to alloc notify buffer, aborting.\n");
  1312. goto err_out_free_intr;
  1313. }
  1314. for (i = 0; i < enic->rq_count; i++) {
  1315. vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
  1316. /* Need at least one buffer on ring to get going */
  1317. if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
  1318. netdev_err(netdev, "Unable to alloc receive buffers\n");
  1319. err = -ENOMEM;
  1320. goto err_out_notify_unset;
  1321. }
  1322. }
  1323. for (i = 0; i < enic->wq_count; i++)
  1324. vnic_wq_enable(&enic->wq[i]);
  1325. for (i = 0; i < enic->rq_count; i++)
  1326. vnic_rq_enable(&enic->rq[i]);
  1327. if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
  1328. enic_dev_add_addr(enic, enic->pp.mac_addr);
  1329. else
  1330. enic_dev_add_station_addr(enic);
  1331. enic_set_rx_mode(netdev);
  1332. netif_wake_queue(netdev);
  1333. for (i = 0; i < enic->rq_count; i++)
  1334. napi_enable(&enic->napi[i]);
  1335. enic_dev_enable(enic);
  1336. for (i = 0; i < enic->intr_count; i++)
  1337. vnic_intr_unmask(&enic->intr[i]);
  1338. enic_notify_timer_start(enic);
  1339. return 0;
  1340. err_out_notify_unset:
  1341. enic_dev_notify_unset(enic);
  1342. err_out_free_intr:
  1343. enic_free_intr(enic);
  1344. return err;
  1345. }
  1346. /* rtnl lock is held, process context */
  1347. static int enic_stop(struct net_device *netdev)
  1348. {
  1349. struct enic *enic = netdev_priv(netdev);
  1350. unsigned int i;
  1351. int err;
  1352. for (i = 0; i < enic->intr_count; i++) {
  1353. vnic_intr_mask(&enic->intr[i]);
  1354. (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
  1355. }
  1356. enic_synchronize_irqs(enic);
  1357. del_timer_sync(&enic->notify_timer);
  1358. enic_dev_disable(enic);
  1359. for (i = 0; i < enic->rq_count; i++)
  1360. napi_disable(&enic->napi[i]);
  1361. netif_carrier_off(netdev);
  1362. netif_tx_disable(netdev);
  1363. if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
  1364. enic_dev_del_addr(enic, enic->pp.mac_addr);
  1365. else
  1366. enic_dev_del_station_addr(enic);
  1367. for (i = 0; i < enic->wq_count; i++) {
  1368. err = vnic_wq_disable(&enic->wq[i]);
  1369. if (err)
  1370. return err;
  1371. }
  1372. for (i = 0; i < enic->rq_count; i++) {
  1373. err = vnic_rq_disable(&enic->rq[i]);
  1374. if (err)
  1375. return err;
  1376. }
  1377. enic_dev_notify_unset(enic);
  1378. enic_free_intr(enic);
  1379. for (i = 0; i < enic->wq_count; i++)
  1380. vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
  1381. for (i = 0; i < enic->rq_count; i++)
  1382. vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
  1383. for (i = 0; i < enic->cq_count; i++)
  1384. vnic_cq_clean(&enic->cq[i]);
  1385. for (i = 0; i < enic->intr_count; i++)
  1386. vnic_intr_clean(&enic->intr[i]);
  1387. return 0;
  1388. }
  1389. static int enic_change_mtu(struct net_device *netdev, int new_mtu)
  1390. {
  1391. struct enic *enic = netdev_priv(netdev);
  1392. int running = netif_running(netdev);
  1393. if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
  1394. return -EINVAL;
  1395. if (enic_is_dynamic(enic))
  1396. return -EOPNOTSUPP;
  1397. if (running)
  1398. enic_stop(netdev);
  1399. netdev->mtu = new_mtu;
  1400. if (netdev->mtu > enic->port_mtu)
  1401. netdev_warn(netdev,
  1402. "interface MTU (%d) set higher than port MTU (%d)\n",
  1403. netdev->mtu, enic->port_mtu);
  1404. if (running)
  1405. enic_open(netdev);
  1406. return 0;
  1407. }
  1408. static void enic_change_mtu_work(struct work_struct *work)
  1409. {
  1410. struct enic *enic = container_of(work, struct enic, change_mtu_work);
  1411. struct net_device *netdev = enic->netdev;
  1412. int new_mtu = vnic_dev_mtu(enic->vdev);
  1413. int err;
  1414. unsigned int i;
  1415. new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
  1416. rtnl_lock();
  1417. /* Stop RQ */
  1418. del_timer_sync(&enic->notify_timer);
  1419. for (i = 0; i < enic->rq_count; i++)
  1420. napi_disable(&enic->napi[i]);
  1421. vnic_intr_mask(&enic->intr[0]);
  1422. enic_synchronize_irqs(enic);
  1423. err = vnic_rq_disable(&enic->rq[0]);
  1424. if (err) {
  1425. netdev_err(netdev, "Unable to disable RQ.\n");
  1426. return;
  1427. }
  1428. vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
  1429. vnic_cq_clean(&enic->cq[0]);
  1430. vnic_intr_clean(&enic->intr[0]);
  1431. /* Fill RQ with new_mtu-sized buffers */
  1432. netdev->mtu = new_mtu;
  1433. vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
  1434. /* Need at least one buffer on ring to get going */
  1435. if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
  1436. netdev_err(netdev, "Unable to alloc receive buffers.\n");
  1437. return;
  1438. }
  1439. /* Start RQ */
  1440. vnic_rq_enable(&enic->rq[0]);
  1441. napi_enable(&enic->napi[0]);
  1442. vnic_intr_unmask(&enic->intr[0]);
  1443. enic_notify_timer_start(enic);
  1444. rtnl_unlock();
  1445. netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
  1446. }
  1447. #ifdef CONFIG_NET_POLL_CONTROLLER
  1448. static void enic_poll_controller(struct net_device *netdev)
  1449. {
  1450. struct enic *enic = netdev_priv(netdev);
  1451. struct vnic_dev *vdev = enic->vdev;
  1452. unsigned int i, intr;
  1453. switch (vnic_dev_get_intr_mode(vdev)) {
  1454. case VNIC_DEV_INTR_MODE_MSIX:
  1455. for (i = 0; i < enic->rq_count; i++) {
  1456. intr = enic_msix_rq_intr(enic, i);
  1457. enic_isr_msix_rq(enic->msix_entry[intr].vector,
  1458. &enic->napi[i]);
  1459. }
  1460. intr = enic_msix_wq_intr(enic, i);
  1461. enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
  1462. break;
  1463. case VNIC_DEV_INTR_MODE_MSI:
  1464. enic_isr_msi(enic->pdev->irq, enic);
  1465. break;
  1466. case VNIC_DEV_INTR_MODE_INTX:
  1467. enic_isr_legacy(enic->pdev->irq, netdev);
  1468. break;
  1469. default:
  1470. break;
  1471. }
  1472. }
  1473. #endif
  1474. static int enic_dev_wait(struct vnic_dev *vdev,
  1475. int (*start)(struct vnic_dev *, int),
  1476. int (*finished)(struct vnic_dev *, int *),
  1477. int arg)
  1478. {
  1479. unsigned long time;
  1480. int done;
  1481. int err;
  1482. BUG_ON(in_interrupt());
  1483. err = start(vdev, arg);
  1484. if (err)
  1485. return err;
  1486. /* Wait for func to complete...2 seconds max
  1487. */
  1488. time = jiffies + (HZ * 2);
  1489. do {
  1490. err = finished(vdev, &done);
  1491. if (err)
  1492. return err;
  1493. if (done)
  1494. return 0;
  1495. schedule_timeout_uninterruptible(HZ / 10);
  1496. } while (time_after(time, jiffies));
  1497. return -ETIMEDOUT;
  1498. }
  1499. static int enic_dev_open(struct enic *enic)
  1500. {
  1501. int err;
  1502. err = enic_dev_wait(enic->vdev, vnic_dev_open,
  1503. vnic_dev_open_done, 0);
  1504. if (err)
  1505. dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
  1506. err);
  1507. return err;
  1508. }
  1509. static int enic_dev_hang_reset(struct enic *enic)
  1510. {
  1511. int err;
  1512. err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
  1513. vnic_dev_hang_reset_done, 0);
  1514. if (err)
  1515. netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
  1516. err);
  1517. return err;
  1518. }
  1519. static int enic_set_rsskey(struct enic *enic)
  1520. {
  1521. dma_addr_t rss_key_buf_pa;
  1522. union vnic_rss_key *rss_key_buf_va = NULL;
  1523. union vnic_rss_key rss_key = {
  1524. .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
  1525. .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
  1526. .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
  1527. .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
  1528. };
  1529. int err;
  1530. rss_key_buf_va = pci_alloc_consistent(enic->pdev,
  1531. sizeof(union vnic_rss_key), &rss_key_buf_pa);
  1532. if (!rss_key_buf_va)
  1533. return -ENOMEM;
  1534. memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
  1535. spin_lock(&enic->devcmd_lock);
  1536. err = enic_set_rss_key(enic,
  1537. rss_key_buf_pa,
  1538. sizeof(union vnic_rss_key));
  1539. spin_unlock(&enic->devcmd_lock);
  1540. pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
  1541. rss_key_buf_va, rss_key_buf_pa);
  1542. return err;
  1543. }
  1544. static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
  1545. {
  1546. dma_addr_t rss_cpu_buf_pa;
  1547. union vnic_rss_cpu *rss_cpu_buf_va = NULL;
  1548. unsigned int i;
  1549. int err;
  1550. rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
  1551. sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
  1552. if (!rss_cpu_buf_va)
  1553. return -ENOMEM;
  1554. for (i = 0; i < (1 << rss_hash_bits); i++)
  1555. (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
  1556. spin_lock(&enic->devcmd_lock);
  1557. err = enic_set_rss_cpu(enic,
  1558. rss_cpu_buf_pa,
  1559. sizeof(union vnic_rss_cpu));
  1560. spin_unlock(&enic->devcmd_lock);
  1561. pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
  1562. rss_cpu_buf_va, rss_cpu_buf_pa);
  1563. return err;
  1564. }
  1565. static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
  1566. u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
  1567. {
  1568. const u8 tso_ipid_split_en = 0;
  1569. const u8 ig_vlan_strip_en = 1;
  1570. int err;
  1571. /* Enable VLAN tag stripping.
  1572. */
  1573. spin_lock(&enic->devcmd_lock);
  1574. err = enic_set_nic_cfg(enic,
  1575. rss_default_cpu, rss_hash_type,
  1576. rss_hash_bits, rss_base_cpu,
  1577. rss_enable, tso_ipid_split_en,
  1578. ig_vlan_strip_en);
  1579. spin_unlock(&enic->devcmd_lock);
  1580. return err;
  1581. }
  1582. static int enic_set_rss_nic_cfg(struct enic *enic)
  1583. {
  1584. struct device *dev = enic_get_dev(enic);
  1585. const u8 rss_default_cpu = 0;
  1586. const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
  1587. NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
  1588. NIC_CFG_RSS_HASH_TYPE_IPV6 |
  1589. NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
  1590. const u8 rss_hash_bits = 7;
  1591. const u8 rss_base_cpu = 0;
  1592. u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
  1593. if (rss_enable) {
  1594. if (!enic_set_rsskey(enic)) {
  1595. if (enic_set_rsscpu(enic, rss_hash_bits)) {
  1596. rss_enable = 0;
  1597. dev_warn(dev, "RSS disabled, "
  1598. "Failed to set RSS cpu indirection table.");
  1599. }
  1600. } else {
  1601. rss_enable = 0;
  1602. dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
  1603. }
  1604. }
  1605. return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
  1606. rss_hash_bits, rss_base_cpu, rss_enable);
  1607. }
  1608. static void enic_reset(struct work_struct *work)
  1609. {
  1610. struct enic *enic = container_of(work, struct enic, reset);
  1611. if (!netif_running(enic->netdev))
  1612. return;
  1613. rtnl_lock();
  1614. enic_dev_hang_notify(enic);
  1615. enic_stop(enic->netdev);
  1616. enic_dev_hang_reset(enic);
  1617. enic_reset_addr_lists(enic);
  1618. enic_init_vnic_resources(enic);
  1619. enic_set_rss_nic_cfg(enic);
  1620. enic_dev_set_ig_vlan_rewrite_mode(enic);
  1621. enic_open(enic->netdev);
  1622. rtnl_unlock();
  1623. }
  1624. static int enic_set_intr_mode(struct enic *enic)
  1625. {
  1626. unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
  1627. unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
  1628. unsigned int i;
  1629. /* Set interrupt mode (INTx, MSI, MSI-X) depending
  1630. * on system capabilities.
  1631. *
  1632. * Try MSI-X first
  1633. *
  1634. * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
  1635. * (the second to last INTR is used for WQ/RQ errors)
  1636. * (the last INTR is used for notifications)
  1637. */
  1638. BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
  1639. for (i = 0; i < n + m + 2; i++)
  1640. enic->msix_entry[i].entry = i;
  1641. /* Use multiple RQs if RSS is enabled
  1642. */
  1643. if (ENIC_SETTING(enic, RSS) &&
  1644. enic->config.intr_mode < 1 &&
  1645. enic->rq_count >= n &&
  1646. enic->wq_count >= m &&
  1647. enic->cq_count >= n + m &&
  1648. enic->intr_count >= n + m + 2) {
  1649. if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
  1650. enic->rq_count = n;
  1651. enic->wq_count = m;
  1652. enic->cq_count = n + m;
  1653. enic->intr_count = n + m + 2;
  1654. vnic_dev_set_intr_mode(enic->vdev,
  1655. VNIC_DEV_INTR_MODE_MSIX);
  1656. return 0;
  1657. }
  1658. }
  1659. if (enic->config.intr_mode < 1 &&
  1660. enic->rq_count >= 1 &&
  1661. enic->wq_count >= m &&
  1662. enic->cq_count >= 1 + m &&
  1663. enic->intr_count >= 1 + m + 2) {
  1664. if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
  1665. enic->rq_count = 1;
  1666. enic->wq_count = m;
  1667. enic->cq_count = 1 + m;
  1668. enic->intr_count = 1 + m + 2;
  1669. vnic_dev_set_intr_mode(enic->vdev,
  1670. VNIC_DEV_INTR_MODE_MSIX);
  1671. return 0;
  1672. }
  1673. }
  1674. /* Next try MSI
  1675. *
  1676. * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
  1677. */
  1678. if (enic->config.intr_mode < 2 &&
  1679. enic->rq_count >= 1 &&
  1680. enic->wq_count >= 1 &&
  1681. enic->cq_count >= 2 &&
  1682. enic->intr_count >= 1 &&
  1683. !pci_enable_msi(enic->pdev)) {
  1684. enic->rq_count = 1;
  1685. enic->wq_count = 1;
  1686. enic->cq_count = 2;
  1687. enic->intr_count = 1;
  1688. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
  1689. return 0;
  1690. }
  1691. /* Next try INTx
  1692. *
  1693. * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
  1694. * (the first INTR is used for WQ/RQ)
  1695. * (the second INTR is used for WQ/RQ errors)
  1696. * (the last INTR is used for notifications)
  1697. */
  1698. if (enic->config.intr_mode < 3 &&
  1699. enic->rq_count >= 1 &&
  1700. enic->wq_count >= 1 &&
  1701. enic->cq_count >= 2 &&
  1702. enic->intr_count >= 3) {
  1703. enic->rq_count = 1;
  1704. enic->wq_count = 1;
  1705. enic->cq_count = 2;
  1706. enic->intr_count = 3;
  1707. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
  1708. return 0;
  1709. }
  1710. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
  1711. return -EINVAL;
  1712. }
  1713. static void enic_clear_intr_mode(struct enic *enic)
  1714. {
  1715. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1716. case VNIC_DEV_INTR_MODE_MSIX:
  1717. pci_disable_msix(enic->pdev);
  1718. break;
  1719. case VNIC_DEV_INTR_MODE_MSI:
  1720. pci_disable_msi(enic->pdev);
  1721. break;
  1722. default:
  1723. break;
  1724. }
  1725. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
  1726. }
  1727. static const struct net_device_ops enic_netdev_dynamic_ops = {
  1728. .ndo_open = enic_open,
  1729. .ndo_stop = enic_stop,
  1730. .ndo_start_xmit = enic_hard_start_xmit,
  1731. .ndo_get_stats64 = enic_get_stats,
  1732. .ndo_validate_addr = eth_validate_addr,
  1733. .ndo_set_rx_mode = enic_set_rx_mode,
  1734. .ndo_set_multicast_list = enic_set_rx_mode,
  1735. .ndo_set_mac_address = enic_set_mac_address_dynamic,
  1736. .ndo_change_mtu = enic_change_mtu,
  1737. .ndo_vlan_rx_register = enic_vlan_rx_register,
  1738. .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
  1739. .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
  1740. .ndo_tx_timeout = enic_tx_timeout,
  1741. .ndo_set_vf_port = enic_set_vf_port,
  1742. .ndo_get_vf_port = enic_get_vf_port,
  1743. .ndo_set_vf_mac = enic_set_vf_mac,
  1744. #ifdef CONFIG_NET_POLL_CONTROLLER
  1745. .ndo_poll_controller = enic_poll_controller,
  1746. #endif
  1747. };
  1748. static const struct net_device_ops enic_netdev_ops = {
  1749. .ndo_open = enic_open,
  1750. .ndo_stop = enic_stop,
  1751. .ndo_start_xmit = enic_hard_start_xmit,
  1752. .ndo_get_stats64 = enic_get_stats,
  1753. .ndo_validate_addr = eth_validate_addr,
  1754. .ndo_set_mac_address = enic_set_mac_address,
  1755. .ndo_set_rx_mode = enic_set_rx_mode,
  1756. .ndo_set_multicast_list = enic_set_rx_mode,
  1757. .ndo_change_mtu = enic_change_mtu,
  1758. .ndo_vlan_rx_register = enic_vlan_rx_register,
  1759. .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
  1760. .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
  1761. .ndo_tx_timeout = enic_tx_timeout,
  1762. #ifdef CONFIG_NET_POLL_CONTROLLER
  1763. .ndo_poll_controller = enic_poll_controller,
  1764. #endif
  1765. };
  1766. static void enic_dev_deinit(struct enic *enic)
  1767. {
  1768. unsigned int i;
  1769. for (i = 0; i < enic->rq_count; i++)
  1770. netif_napi_del(&enic->napi[i]);
  1771. enic_free_vnic_resources(enic);
  1772. enic_clear_intr_mode(enic);
  1773. }
  1774. static int enic_dev_init(struct enic *enic)
  1775. {
  1776. struct device *dev = enic_get_dev(enic);
  1777. struct net_device *netdev = enic->netdev;
  1778. unsigned int i;
  1779. int err;
  1780. /* Get vNIC configuration
  1781. */
  1782. err = enic_get_vnic_config(enic);
  1783. if (err) {
  1784. dev_err(dev, "Get vNIC configuration failed, aborting\n");
  1785. return err;
  1786. }
  1787. /* Get available resource counts
  1788. */
  1789. enic_get_res_counts(enic);
  1790. /* Set interrupt mode based on resource counts and system
  1791. * capabilities
  1792. */
  1793. err = enic_set_intr_mode(enic);
  1794. if (err) {
  1795. dev_err(dev, "Failed to set intr mode based on resource "
  1796. "counts and system capabilities, aborting\n");
  1797. return err;
  1798. }
  1799. /* Allocate and configure vNIC resources
  1800. */
  1801. err = enic_alloc_vnic_resources(enic);
  1802. if (err) {
  1803. dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
  1804. goto err_out_free_vnic_resources;
  1805. }
  1806. enic_init_vnic_resources(enic);
  1807. err = enic_set_rss_nic_cfg(enic);
  1808. if (err) {
  1809. dev_err(dev, "Failed to config nic, aborting\n");
  1810. goto err_out_free_vnic_resources;
  1811. }
  1812. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1813. default:
  1814. netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
  1815. break;
  1816. case VNIC_DEV_INTR_MODE_MSIX:
  1817. for (i = 0; i < enic->rq_count; i++)
  1818. netif_napi_add(netdev, &enic->napi[i],
  1819. enic_poll_msix, 64);
  1820. break;
  1821. }
  1822. return 0;
  1823. err_out_free_vnic_resources:
  1824. enic_clear_intr_mode(enic);
  1825. enic_free_vnic_resources(enic);
  1826. return err;
  1827. }
  1828. static void enic_iounmap(struct enic *enic)
  1829. {
  1830. unsigned int i;
  1831. for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
  1832. if (enic->bar[i].vaddr)
  1833. iounmap(enic->bar[i].vaddr);
  1834. }
  1835. static int __devinit enic_probe(struct pci_dev *pdev,
  1836. const struct pci_device_id *ent)
  1837. {
  1838. struct device *dev = &pdev->dev;
  1839. struct net_device *netdev;
  1840. struct enic *enic;
  1841. int using_dac = 0;
  1842. unsigned int i;
  1843. int err;
  1844. /* Allocate net device structure and initialize. Private
  1845. * instance data is initialized to zero.
  1846. */
  1847. netdev = alloc_etherdev(sizeof(struct enic));
  1848. if (!netdev) {
  1849. pr_err("Etherdev alloc failed, aborting\n");
  1850. return -ENOMEM;
  1851. }
  1852. pci_set_drvdata(pdev, netdev);
  1853. SET_NETDEV_DEV(netdev, &pdev->dev);
  1854. enic = netdev_priv(netdev);
  1855. enic->netdev = netdev;
  1856. enic->pdev = pdev;
  1857. /* Setup PCI resources
  1858. */
  1859. err = pci_enable_device_mem(pdev);
  1860. if (err) {
  1861. dev_err(dev, "Cannot enable PCI device, aborting\n");
  1862. goto err_out_free_netdev;
  1863. }
  1864. err = pci_request_regions(pdev, DRV_NAME);
  1865. if (err) {
  1866. dev_err(dev, "Cannot request PCI regions, aborting\n");
  1867. goto err_out_disable_device;
  1868. }
  1869. pci_set_master(pdev);
  1870. /* Query PCI controller on system for DMA addressing
  1871. * limitation for the device. Try 40-bit first, and
  1872. * fail to 32-bit.
  1873. */
  1874. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
  1875. if (err) {
  1876. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1877. if (err) {
  1878. dev_err(dev, "No usable DMA configuration, aborting\n");
  1879. goto err_out_release_regions;
  1880. }
  1881. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1882. if (err) {
  1883. dev_err(dev, "Unable to obtain %u-bit DMA "
  1884. "for consistent allocations, aborting\n", 32);
  1885. goto err_out_release_regions;
  1886. }
  1887. } else {
  1888. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
  1889. if (err) {
  1890. dev_err(dev, "Unable to obtain %u-bit DMA "
  1891. "for consistent allocations, aborting\n", 40);
  1892. goto err_out_release_regions;
  1893. }
  1894. using_dac = 1;
  1895. }
  1896. /* Map vNIC resources from BAR0-5
  1897. */
  1898. for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
  1899. if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
  1900. continue;
  1901. enic->bar[i].len = pci_resource_len(pdev, i);
  1902. enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
  1903. if (!enic->bar[i].vaddr) {
  1904. dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
  1905. err = -ENODEV;
  1906. goto err_out_iounmap;
  1907. }
  1908. enic->bar[i].bus_addr = pci_resource_start(pdev, i);
  1909. }
  1910. /* Register vNIC device
  1911. */
  1912. enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
  1913. ARRAY_SIZE(enic->bar));
  1914. if (!enic->vdev) {
  1915. dev_err(dev, "vNIC registration failed, aborting\n");
  1916. err = -ENODEV;
  1917. goto err_out_iounmap;
  1918. }
  1919. /* Issue device open to get device in known state
  1920. */
  1921. err = enic_dev_open(enic);
  1922. if (err) {
  1923. dev_err(dev, "vNIC dev open failed, aborting\n");
  1924. goto err_out_vnic_unregister;
  1925. }
  1926. /* Setup devcmd lock
  1927. */
  1928. spin_lock_init(&enic->devcmd_lock);
  1929. /*
  1930. * Set ingress vlan rewrite mode before vnic initialization
  1931. */
  1932. err = enic_dev_set_ig_vlan_rewrite_mode(enic);
  1933. if (err) {
  1934. dev_err(dev,
  1935. "Failed to set ingress vlan rewrite mode, aborting.\n");
  1936. goto err_out_dev_close;
  1937. }
  1938. /* Issue device init to initialize the vnic-to-switch link.
  1939. * We'll start with carrier off and wait for link UP
  1940. * notification later to turn on carrier. We don't need
  1941. * to wait here for the vnic-to-switch link initialization
  1942. * to complete; link UP notification is the indication that
  1943. * the process is complete.
  1944. */
  1945. netif_carrier_off(netdev);
  1946. /* Do not call dev_init for a dynamic vnic.
  1947. * For a dynamic vnic, init_prov_info will be
  1948. * called later by an upper layer.
  1949. */
  1950. if (!enic_is_dynamic(enic)) {
  1951. err = vnic_dev_init(enic->vdev, 0);
  1952. if (err) {
  1953. dev_err(dev, "vNIC dev init failed, aborting\n");
  1954. goto err_out_dev_close;
  1955. }
  1956. }
  1957. err = enic_dev_init(enic);
  1958. if (err) {
  1959. dev_err(dev, "Device initialization failed, aborting\n");
  1960. goto err_out_dev_close;
  1961. }
  1962. /* Setup notification timer, HW reset task, and wq locks
  1963. */
  1964. init_timer(&enic->notify_timer);
  1965. enic->notify_timer.function = enic_notify_timer;
  1966. enic->notify_timer.data = (unsigned long)enic;
  1967. INIT_WORK(&enic->reset, enic_reset);
  1968. INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
  1969. for (i = 0; i < enic->wq_count; i++)
  1970. spin_lock_init(&enic->wq_lock[i]);
  1971. /* Register net device
  1972. */
  1973. enic->port_mtu = enic->config.mtu;
  1974. (void)enic_change_mtu(netdev, enic->port_mtu);
  1975. err = enic_set_mac_addr(netdev, enic->mac_addr);
  1976. if (err) {
  1977. dev_err(dev, "Invalid MAC address, aborting\n");
  1978. goto err_out_dev_deinit;
  1979. }
  1980. enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
  1981. enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
  1982. if (enic_is_dynamic(enic))
  1983. netdev->netdev_ops = &enic_netdev_dynamic_ops;
  1984. else
  1985. netdev->netdev_ops = &enic_netdev_ops;
  1986. netdev->watchdog_timeo = 2 * HZ;
  1987. netdev->ethtool_ops = &enic_ethtool_ops;
  1988. netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  1989. if (ENIC_SETTING(enic, LOOP)) {
  1990. netdev->features &= ~NETIF_F_HW_VLAN_TX;
  1991. enic->loop_enable = 1;
  1992. enic->loop_tag = enic->config.loop_tag;
  1993. dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
  1994. }
  1995. if (ENIC_SETTING(enic, TXCSUM))
  1996. netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
  1997. if (ENIC_SETTING(enic, TSO))
  1998. netdev->hw_features |= NETIF_F_TSO |
  1999. NETIF_F_TSO6 | NETIF_F_TSO_ECN;
  2000. if (ENIC_SETTING(enic, RXCSUM))
  2001. netdev->hw_features |= NETIF_F_RXCSUM;
  2002. netdev->features |= netdev->hw_features;
  2003. if (using_dac)
  2004. netdev->features |= NETIF_F_HIGHDMA;
  2005. err = register_netdev(netdev);
  2006. if (err) {
  2007. dev_err(dev, "Cannot register net device, aborting\n");
  2008. goto err_out_dev_deinit;
  2009. }
  2010. return 0;
  2011. err_out_dev_deinit:
  2012. enic_dev_deinit(enic);
  2013. err_out_dev_close:
  2014. vnic_dev_close(enic->vdev);
  2015. err_out_vnic_unregister:
  2016. vnic_dev_unregister(enic->vdev);
  2017. err_out_iounmap:
  2018. enic_iounmap(enic);
  2019. err_out_release_regions:
  2020. pci_release_regions(pdev);
  2021. err_out_disable_device:
  2022. pci_disable_device(pdev);
  2023. err_out_free_netdev:
  2024. pci_set_drvdata(pdev, NULL);
  2025. free_netdev(netdev);
  2026. return err;
  2027. }
  2028. static void __devexit enic_remove(struct pci_dev *pdev)
  2029. {
  2030. struct net_device *netdev = pci_get_drvdata(pdev);
  2031. if (netdev) {
  2032. struct enic *enic = netdev_priv(netdev);
  2033. cancel_work_sync(&enic->reset);
  2034. cancel_work_sync(&enic->change_mtu_work);
  2035. unregister_netdev(netdev);
  2036. enic_dev_deinit(enic);
  2037. vnic_dev_close(enic->vdev);
  2038. vnic_dev_unregister(enic->vdev);
  2039. enic_iounmap(enic);
  2040. pci_release_regions(pdev);
  2041. pci_disable_device(pdev);
  2042. pci_set_drvdata(pdev, NULL);
  2043. free_netdev(netdev);
  2044. }
  2045. }
  2046. static struct pci_driver enic_driver = {
  2047. .name = DRV_NAME,
  2048. .id_table = enic_id_table,
  2049. .probe = enic_probe,
  2050. .remove = __devexit_p(enic_remove),
  2051. };
  2052. static int __init enic_init_module(void)
  2053. {
  2054. pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
  2055. return pci_register_driver(&enic_driver);
  2056. }
  2057. static void __exit enic_cleanup_module(void)
  2058. {
  2059. pci_unregister_driver(&enic_driver);
  2060. }
  2061. module_init(enic_init_module);
  2062. module_exit(enic_cleanup_module);