ixgbe_main.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869
  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2007 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include <linux/types.h>
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/string.h>
  27. #include <linux/in.h>
  28. #include <linux/ip.h>
  29. #include <linux/tcp.h>
  30. #include <linux/ipv6.h>
  31. #include <net/checksum.h>
  32. #include <net/ip6_checksum.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/if_vlan.h>
  35. #include "ixgbe.h"
  36. #include "ixgbe_common.h"
  37. char ixgbe_driver_name[] = "ixgbe";
  38. static const char ixgbe_driver_string[] =
  39. "Intel(R) 10 Gigabit PCI Express Network Driver";
  40. #define DRV_VERSION "1.1.18"
  41. const char ixgbe_driver_version[] = DRV_VERSION;
  42. static const char ixgbe_copyright[] =
  43. "Copyright (c) 1999-2007 Intel Corporation.";
  44. static const struct ixgbe_info *ixgbe_info_tbl[] = {
  45. [board_82598] = &ixgbe_82598_info,
  46. };
  47. /* ixgbe_pci_tbl - PCI Device ID Table
  48. *
  49. * Wildcard entries (PCI_ANY_ID) should come last
  50. * Last entry must be all 0s
  51. *
  52. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  53. * Class, Class Mask, private data (not used) }
  54. */
  55. static struct pci_device_id ixgbe_pci_tbl[] = {
  56. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
  57. board_82598 },
  58. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
  59. board_82598 },
  60. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
  61. board_82598 },
  62. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
  63. board_82598 },
  64. /* required last entry */
  65. {0, }
  66. };
  67. MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
  68. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  69. MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  70. MODULE_LICENSE("GPL");
  71. MODULE_VERSION(DRV_VERSION);
  72. #define DEFAULT_DEBUG_LEVEL_SHIFT 3
  73. #ifdef DEBUG
  74. /**
  75. * ixgbe_get_hw_dev_name - return device name string
  76. * used by hardware layer to print debugging information
  77. **/
  78. char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
  79. {
  80. struct ixgbe_adapter *adapter = hw->back;
  81. struct net_device *netdev = adapter->netdev;
  82. return netdev->name;
  83. }
  84. #endif
  85. static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
  86. u8 msix_vector)
  87. {
  88. u32 ivar, index;
  89. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  90. index = (int_alloc_entry >> 2) & 0x1F;
  91. ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
  92. ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
  93. ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
  94. IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
  95. }
  96. static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
  97. struct ixgbe_tx_buffer
  98. *tx_buffer_info)
  99. {
  100. if (tx_buffer_info->dma) {
  101. pci_unmap_page(adapter->pdev,
  102. tx_buffer_info->dma,
  103. tx_buffer_info->length, PCI_DMA_TODEVICE);
  104. tx_buffer_info->dma = 0;
  105. }
  106. if (tx_buffer_info->skb) {
  107. dev_kfree_skb_any(tx_buffer_info->skb);
  108. tx_buffer_info->skb = NULL;
  109. }
  110. /* tx_buffer_info must be completely set up in the transmit path */
  111. }
  112. static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
  113. struct ixgbe_ring *tx_ring,
  114. unsigned int eop,
  115. union ixgbe_adv_tx_desc *eop_desc)
  116. {
  117. /* Detect a transmit hang in hardware, this serializes the
  118. * check with the clearing of time_stamp and movement of i */
  119. adapter->detect_tx_hung = false;
  120. if (tx_ring->tx_buffer_info[eop].dma &&
  121. time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
  122. !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
  123. /* detected Tx unit hang */
  124. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  125. " TDH <%x>\n"
  126. " TDT <%x>\n"
  127. " next_to_use <%x>\n"
  128. " next_to_clean <%x>\n"
  129. "tx_buffer_info[next_to_clean]\n"
  130. " time_stamp <%lx>\n"
  131. " next_to_watch <%x>\n"
  132. " jiffies <%lx>\n"
  133. " next_to_watch.status <%x>\n",
  134. readl(adapter->hw.hw_addr + tx_ring->head),
  135. readl(adapter->hw.hw_addr + tx_ring->tail),
  136. tx_ring->next_to_use,
  137. tx_ring->next_to_clean,
  138. tx_ring->tx_buffer_info[eop].time_stamp,
  139. eop, jiffies, eop_desc->wb.status);
  140. return true;
  141. }
  142. return false;
  143. }
  144. /**
  145. * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  146. * @adapter: board private structure
  147. **/
  148. static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
  149. struct ixgbe_ring *tx_ring)
  150. {
  151. struct net_device *netdev = adapter->netdev;
  152. union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
  153. struct ixgbe_tx_buffer *tx_buffer_info;
  154. unsigned int i, eop;
  155. bool cleaned = false;
  156. int count = 0;
  157. i = tx_ring->next_to_clean;
  158. eop = tx_ring->tx_buffer_info[i].next_to_watch;
  159. eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  160. while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
  161. for (cleaned = false; !cleaned;) {
  162. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  163. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  164. cleaned = (i == eop);
  165. tx_ring->stats.bytes += tx_buffer_info->length;
  166. ixgbe_unmap_and_free_tx_resource(adapter,
  167. tx_buffer_info);
  168. tx_desc->wb.status = 0;
  169. i++;
  170. if (i == tx_ring->count)
  171. i = 0;
  172. }
  173. tx_ring->stats.packets++;
  174. eop = tx_ring->tx_buffer_info[i].next_to_watch;
  175. eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  176. /* weight of a sort for tx, avoid endless transmit cleanup */
  177. if (count++ >= tx_ring->work_limit)
  178. break;
  179. }
  180. tx_ring->next_to_clean = i;
  181. #define TX_WAKE_THRESHOLD 32
  182. spin_lock(&tx_ring->tx_lock);
  183. if (cleaned && netif_carrier_ok(netdev) &&
  184. (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
  185. !test_bit(__IXGBE_DOWN, &adapter->state))
  186. netif_wake_queue(netdev);
  187. spin_unlock(&tx_ring->tx_lock);
  188. if (adapter->detect_tx_hung)
  189. if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
  190. netif_stop_queue(netdev);
  191. if (count >= tx_ring->work_limit)
  192. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
  193. return cleaned;
  194. }
  195. /**
  196. * ixgbe_receive_skb - Send a completed packet up the stack
  197. * @adapter: board private structure
  198. * @skb: packet to send up
  199. * @is_vlan: packet has a VLAN tag
  200. * @tag: VLAN tag from descriptor
  201. **/
  202. static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
  203. struct sk_buff *skb, bool is_vlan,
  204. u16 tag)
  205. {
  206. if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
  207. if (adapter->vlgrp && is_vlan)
  208. vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
  209. else
  210. netif_receive_skb(skb);
  211. } else {
  212. if (adapter->vlgrp && is_vlan)
  213. vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
  214. else
  215. netif_rx(skb);
  216. }
  217. }
  218. static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
  219. u32 status_err,
  220. struct sk_buff *skb)
  221. {
  222. skb->ip_summed = CHECKSUM_NONE;
  223. /* Ignore Checksum bit is set */
  224. if ((status_err & IXGBE_RXD_STAT_IXSM) ||
  225. !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
  226. return;
  227. /* TCP/UDP checksum error bit is set */
  228. if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
  229. /* let the stack verify checksum errors */
  230. adapter->hw_csum_rx_error++;
  231. return;
  232. }
  233. /* It must be a TCP or UDP packet with a valid checksum */
  234. if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
  235. skb->ip_summed = CHECKSUM_UNNECESSARY;
  236. adapter->hw_csum_rx_good++;
  237. }
  238. /**
  239. * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
  240. * @adapter: address of board private structure
  241. **/
  242. static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
  243. struct ixgbe_ring *rx_ring,
  244. int cleaned_count)
  245. {
  246. struct net_device *netdev = adapter->netdev;
  247. struct pci_dev *pdev = adapter->pdev;
  248. union ixgbe_adv_rx_desc *rx_desc;
  249. struct ixgbe_rx_buffer *rx_buffer_info;
  250. struct sk_buff *skb;
  251. unsigned int i;
  252. unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
  253. i = rx_ring->next_to_use;
  254. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  255. while (cleaned_count--) {
  256. rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  257. if (!rx_buffer_info->page &&
  258. (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
  259. rx_buffer_info->page = alloc_page(GFP_ATOMIC);
  260. if (!rx_buffer_info->page) {
  261. adapter->alloc_rx_page_failed++;
  262. goto no_buffers;
  263. }
  264. rx_buffer_info->page_dma =
  265. pci_map_page(pdev, rx_buffer_info->page,
  266. 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
  267. }
  268. if (!rx_buffer_info->skb) {
  269. skb = netdev_alloc_skb(netdev, bufsz);
  270. if (!skb) {
  271. adapter->alloc_rx_buff_failed++;
  272. goto no_buffers;
  273. }
  274. /*
  275. * Make buffer alignment 2 beyond a 16 byte boundary
  276. * this will result in a 16 byte aligned IP header after
  277. * the 14 byte MAC header is removed
  278. */
  279. skb_reserve(skb, NET_IP_ALIGN);
  280. rx_buffer_info->skb = skb;
  281. rx_buffer_info->dma = pci_map_single(pdev, skb->data,
  282. bufsz,
  283. PCI_DMA_FROMDEVICE);
  284. }
  285. /* Refresh the desc even if buffer_addrs didn't change because
  286. * each write-back erases this info. */
  287. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  288. rx_desc->read.pkt_addr =
  289. cpu_to_le64(rx_buffer_info->page_dma);
  290. rx_desc->read.hdr_addr =
  291. cpu_to_le64(rx_buffer_info->dma);
  292. } else {
  293. rx_desc->read.pkt_addr =
  294. cpu_to_le64(rx_buffer_info->dma);
  295. }
  296. i++;
  297. if (i == rx_ring->count)
  298. i = 0;
  299. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  300. }
  301. no_buffers:
  302. if (rx_ring->next_to_use != i) {
  303. rx_ring->next_to_use = i;
  304. if (i-- == 0)
  305. i = (rx_ring->count - 1);
  306. /*
  307. * Force memory writes to complete before letting h/w
  308. * know there are new descriptors to fetch. (Only
  309. * applicable for weak-ordered memory model archs,
  310. * such as IA-64).
  311. */
  312. wmb();
  313. writel(i, adapter->hw.hw_addr + rx_ring->tail);
  314. }
  315. }
  316. static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
  317. struct ixgbe_ring *rx_ring,
  318. int *work_done, int work_to_do)
  319. {
  320. struct net_device *netdev = adapter->netdev;
  321. struct pci_dev *pdev = adapter->pdev;
  322. union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
  323. struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
  324. struct sk_buff *skb;
  325. unsigned int i;
  326. u32 upper_len, len, staterr;
  327. u16 hdr_info, vlan_tag;
  328. bool is_vlan, cleaned = false;
  329. int cleaned_count = 0;
  330. i = rx_ring->next_to_clean;
  331. upper_len = 0;
  332. rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  333. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  334. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  335. is_vlan = (staterr & IXGBE_RXD_STAT_VP);
  336. vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  337. while (staterr & IXGBE_RXD_STAT_DD) {
  338. if (*work_done >= work_to_do)
  339. break;
  340. (*work_done)++;
  341. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  342. hdr_info =
  343. le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
  344. len =
  345. ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
  346. IXGBE_RXDADV_HDRBUFLEN_SHIFT);
  347. if (hdr_info & IXGBE_RXDADV_SPH)
  348. adapter->rx_hdr_split++;
  349. if (len > IXGBE_RX_HDR_SIZE)
  350. len = IXGBE_RX_HDR_SIZE;
  351. upper_len = le16_to_cpu(rx_desc->wb.upper.length);
  352. } else
  353. len = le16_to_cpu(rx_desc->wb.upper.length);
  354. cleaned = true;
  355. skb = rx_buffer_info->skb;
  356. prefetch(skb->data - NET_IP_ALIGN);
  357. rx_buffer_info->skb = NULL;
  358. if (len && !skb_shinfo(skb)->nr_frags) {
  359. pci_unmap_single(pdev, rx_buffer_info->dma,
  360. adapter->rx_buf_len + NET_IP_ALIGN,
  361. PCI_DMA_FROMDEVICE);
  362. skb_put(skb, len);
  363. }
  364. if (upper_len) {
  365. pci_unmap_page(pdev, rx_buffer_info->page_dma,
  366. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  367. rx_buffer_info->page_dma = 0;
  368. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  369. rx_buffer_info->page, 0, upper_len);
  370. rx_buffer_info->page = NULL;
  371. skb->len += upper_len;
  372. skb->data_len += upper_len;
  373. skb->truesize += upper_len;
  374. }
  375. i++;
  376. if (i == rx_ring->count)
  377. i = 0;
  378. next_buffer = &rx_ring->rx_buffer_info[i];
  379. next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
  380. prefetch(next_rxd);
  381. cleaned_count++;
  382. if (staterr & IXGBE_RXD_STAT_EOP) {
  383. rx_ring->stats.packets++;
  384. rx_ring->stats.bytes += skb->len;
  385. } else {
  386. rx_buffer_info->skb = next_buffer->skb;
  387. rx_buffer_info->dma = next_buffer->dma;
  388. next_buffer->skb = skb;
  389. adapter->non_eop_descs++;
  390. goto next_desc;
  391. }
  392. if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
  393. dev_kfree_skb_irq(skb);
  394. goto next_desc;
  395. }
  396. ixgbe_rx_checksum(adapter, staterr, skb);
  397. skb->protocol = eth_type_trans(skb, netdev);
  398. ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
  399. netdev->last_rx = jiffies;
  400. next_desc:
  401. rx_desc->wb.upper.status_error = 0;
  402. /* return some buffers to hardware, one at a time is too slow */
  403. if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
  404. ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  405. cleaned_count = 0;
  406. }
  407. /* use prefetched values */
  408. rx_desc = next_rxd;
  409. rx_buffer_info = next_buffer;
  410. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  411. is_vlan = (staterr & IXGBE_RXD_STAT_VP);
  412. vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  413. }
  414. rx_ring->next_to_clean = i;
  415. cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
  416. if (cleaned_count)
  417. ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  418. return cleaned;
  419. }
  420. #define IXGBE_MAX_INTR 10
  421. /**
  422. * ixgbe_configure_msix - Configure MSI-X hardware
  423. * @adapter: board private structure
  424. *
  425. * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
  426. * interrupts.
  427. **/
  428. static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
  429. {
  430. int i, vector = 0;
  431. for (i = 0; i < adapter->num_tx_queues; i++) {
  432. ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
  433. IXGBE_MSIX_VECTOR(vector));
  434. writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
  435. adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
  436. vector++;
  437. }
  438. for (i = 0; i < adapter->num_rx_queues; i++) {
  439. ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i),
  440. IXGBE_MSIX_VECTOR(vector));
  441. writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr),
  442. adapter->hw.hw_addr + adapter->rx_ring[i].itr_register);
  443. vector++;
  444. }
  445. vector = adapter->num_tx_queues + adapter->num_rx_queues;
  446. ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
  447. IXGBE_MSIX_VECTOR(vector));
  448. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950);
  449. }
  450. static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
  451. {
  452. struct net_device *netdev = data;
  453. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  454. struct ixgbe_hw *hw = &adapter->hw;
  455. u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
  456. if (eicr & IXGBE_EICR_LSC) {
  457. adapter->lsc_int++;
  458. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  459. mod_timer(&adapter->watchdog_timer, jiffies);
  460. }
  461. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
  462. return IRQ_HANDLED;
  463. }
  464. static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
  465. {
  466. struct ixgbe_ring *txr = data;
  467. struct ixgbe_adapter *adapter = txr->adapter;
  468. ixgbe_clean_tx_irq(adapter, txr);
  469. return IRQ_HANDLED;
  470. }
  471. static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
  472. {
  473. struct ixgbe_ring *rxr = data;
  474. struct ixgbe_adapter *adapter = rxr->adapter;
  475. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
  476. netif_rx_schedule(adapter->netdev, &adapter->napi);
  477. return IRQ_HANDLED;
  478. }
  479. static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
  480. {
  481. struct ixgbe_adapter *adapter = container_of(napi,
  482. struct ixgbe_adapter, napi);
  483. struct net_device *netdev = adapter->netdev;
  484. int work_done = 0;
  485. struct ixgbe_ring *rxr = adapter->rx_ring;
  486. /* Keep link state information with original netdev */
  487. if (!netif_carrier_ok(netdev))
  488. goto quit_polling;
  489. ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
  490. /* If no Tx and not enough Rx work done, exit the polling mode */
  491. if ((work_done < budget) || !netif_running(netdev)) {
  492. quit_polling:
  493. netif_rx_complete(netdev, napi);
  494. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  495. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
  496. rxr->eims_value);
  497. }
  498. return work_done;
  499. }
  500. /**
  501. * ixgbe_setup_msix - Initialize MSI-X interrupts
  502. *
  503. * ixgbe_setup_msix allocates MSI-X vectors and requests
  504. * interrutps from the kernel.
  505. **/
  506. static int ixgbe_setup_msix(struct ixgbe_adapter *adapter)
  507. {
  508. struct net_device *netdev = adapter->netdev;
  509. int i, int_vector = 0, err = 0;
  510. int max_msix_count;
  511. /* +1 for the LSC interrupt */
  512. max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1;
  513. adapter->msix_entries = kcalloc(max_msix_count,
  514. sizeof(struct msix_entry), GFP_KERNEL);
  515. if (!adapter->msix_entries)
  516. return -ENOMEM;
  517. for (i = 0; i < max_msix_count; i++)
  518. adapter->msix_entries[i].entry = i;
  519. err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
  520. max_msix_count);
  521. if (err)
  522. goto out;
  523. for (i = 0; i < adapter->num_tx_queues; i++) {
  524. sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i);
  525. err = request_irq(adapter->msix_entries[int_vector].vector,
  526. &ixgbe_msix_clean_tx,
  527. 0,
  528. adapter->tx_ring[i].name,
  529. &(adapter->tx_ring[i]));
  530. if (err) {
  531. DPRINTK(PROBE, ERR,
  532. "request_irq failed for MSIX interrupt "
  533. "Error: %d\n", err);
  534. goto release_irqs;
  535. }
  536. adapter->tx_ring[i].eims_value =
  537. (1 << IXGBE_MSIX_VECTOR(int_vector));
  538. adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
  539. int_vector++;
  540. }
  541. for (i = 0; i < adapter->num_rx_queues; i++) {
  542. if (strlen(netdev->name) < (IFNAMSIZ - 5))
  543. sprintf(adapter->rx_ring[i].name,
  544. "%s-rx%d", netdev->name, i);
  545. else
  546. memcpy(adapter->rx_ring[i].name,
  547. netdev->name, IFNAMSIZ);
  548. err = request_irq(adapter->msix_entries[int_vector].vector,
  549. &ixgbe_msix_clean_rx, 0,
  550. adapter->rx_ring[i].name,
  551. &(adapter->rx_ring[i]));
  552. if (err) {
  553. DPRINTK(PROBE, ERR,
  554. "request_irq failed for MSIX interrupt "
  555. "Error: %d\n", err);
  556. goto release_irqs;
  557. }
  558. adapter->rx_ring[i].eims_value =
  559. (1 << IXGBE_MSIX_VECTOR(int_vector));
  560. adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
  561. int_vector++;
  562. }
  563. sprintf(adapter->lsc_name, "%s-lsc", netdev->name);
  564. err = request_irq(adapter->msix_entries[int_vector].vector,
  565. &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev);
  566. if (err) {
  567. DPRINTK(PROBE, ERR,
  568. "request_irq for msix_lsc failed: %d\n", err);
  569. goto release_irqs;
  570. }
  571. /* FIXME: implement netif_napi_remove() instead */
  572. adapter->napi.poll = ixgbe_clean_rxonly;
  573. adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
  574. return 0;
  575. release_irqs:
  576. int_vector--;
  577. for (; int_vector >= adapter->num_tx_queues; int_vector--)
  578. free_irq(adapter->msix_entries[int_vector].vector,
  579. &(adapter->rx_ring[int_vector -
  580. adapter->num_tx_queues]));
  581. for (; int_vector >= 0; int_vector--)
  582. free_irq(adapter->msix_entries[int_vector].vector,
  583. &(adapter->tx_ring[int_vector]));
  584. out:
  585. kfree(adapter->msix_entries);
  586. adapter->msix_entries = NULL;
  587. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  588. return err;
  589. }
  590. /**
  591. * ixgbe_intr - Interrupt Handler
  592. * @irq: interrupt number
  593. * @data: pointer to a network interface device structure
  594. * @pt_regs: CPU registers structure
  595. **/
  596. static irqreturn_t ixgbe_intr(int irq, void *data)
  597. {
  598. struct net_device *netdev = data;
  599. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  600. struct ixgbe_hw *hw = &adapter->hw;
  601. u32 eicr;
  602. eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
  603. if (!eicr)
  604. return IRQ_NONE; /* Not our interrupt */
  605. if (eicr & IXGBE_EICR_LSC) {
  606. adapter->lsc_int++;
  607. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  608. mod_timer(&adapter->watchdog_timer, jiffies);
  609. }
  610. if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
  611. /* Disable interrupts and register for poll. The flush of the
  612. * posted write is intentionally left out. */
  613. atomic_inc(&adapter->irq_sem);
  614. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
  615. __netif_rx_schedule(netdev, &adapter->napi);
  616. }
  617. return IRQ_HANDLED;
  618. }
  619. /**
  620. * ixgbe_request_irq - initialize interrupts
  621. * @adapter: board private structure
  622. *
  623. * Attempts to configure interrupts using the best available
  624. * capabilities of the hardware and kernel.
  625. **/
  626. static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues)
  627. {
  628. struct net_device *netdev = adapter->netdev;
  629. int flags, err;
  630. irq_handler_t handler = ixgbe_intr;
  631. flags = IRQF_SHARED;
  632. err = ixgbe_setup_msix(adapter);
  633. if (!err)
  634. goto request_done;
  635. /*
  636. * if we can't do MSI-X, fall through and try MSI
  637. * No need to reallocate memory since we're decreasing the number of
  638. * queues. We just won't use the other ones, also it is freed correctly
  639. * on ixgbe_remove.
  640. */
  641. *num_rx_queues = 1;
  642. /* do MSI */
  643. err = pci_enable_msi(adapter->pdev);
  644. if (!err) {
  645. adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
  646. flags &= ~IRQF_SHARED;
  647. handler = &ixgbe_intr;
  648. }
  649. err = request_irq(adapter->pdev->irq, handler, flags,
  650. netdev->name, netdev);
  651. if (err)
  652. DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
  653. request_done:
  654. return err;
  655. }
  656. static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  657. {
  658. struct net_device *netdev = adapter->netdev;
  659. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  660. int i;
  661. for (i = 0; i < adapter->num_tx_queues; i++)
  662. free_irq(adapter->msix_entries[i].vector,
  663. &(adapter->tx_ring[i]));
  664. for (i = 0; i < adapter->num_rx_queues; i++)
  665. free_irq(adapter->msix_entries[i +
  666. adapter->num_tx_queues].vector,
  667. &(adapter->rx_ring[i]));
  668. i = adapter->num_rx_queues + adapter->num_tx_queues;
  669. free_irq(adapter->msix_entries[i].vector, netdev);
  670. pci_disable_msix(adapter->pdev);
  671. kfree(adapter->msix_entries);
  672. adapter->msix_entries = NULL;
  673. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  674. return;
  675. }
  676. free_irq(adapter->pdev->irq, netdev);
  677. if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  678. pci_disable_msi(adapter->pdev);
  679. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  680. }
  681. }
  682. /**
  683. * ixgbe_irq_disable - Mask off interrupt generation on the NIC
  684. * @adapter: board private structure
  685. **/
  686. static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  687. {
  688. atomic_inc(&adapter->irq_sem);
  689. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
  690. IXGBE_WRITE_FLUSH(&adapter->hw);
  691. synchronize_irq(adapter->pdev->irq);
  692. }
  693. /**
  694. * ixgbe_irq_enable - Enable default interrupt generation settings
  695. * @adapter: board private structure
  696. **/
  697. static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
  698. {
  699. if (atomic_dec_and_test(&adapter->irq_sem)) {
  700. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  701. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
  702. (IXGBE_EIMS_ENABLE_MASK &
  703. ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
  704. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
  705. IXGBE_EIMS_ENABLE_MASK);
  706. IXGBE_WRITE_FLUSH(&adapter->hw);
  707. }
  708. }
  709. /**
  710. * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  711. *
  712. **/
  713. static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
  714. {
  715. int i;
  716. struct ixgbe_hw *hw = &adapter->hw;
  717. if (adapter->rx_eitr)
  718. IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
  719. EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
  720. /* for re-triggering the interrupt in non-NAPI mode */
  721. adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
  722. adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
  723. ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
  724. for (i = 0; i < adapter->num_tx_queues; i++)
  725. ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i);
  726. }
  727. /**
  728. * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
  729. * @adapter: board private structure
  730. *
  731. * Configure the Tx unit of the MAC after a reset.
  732. **/
  733. static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
  734. {
  735. u64 tdba;
  736. struct ixgbe_hw *hw = &adapter->hw;
  737. u32 i, tdlen;
  738. /* Setup the HW Tx Head and Tail descriptor pointers */
  739. for (i = 0; i < adapter->num_tx_queues; i++) {
  740. tdba = adapter->tx_ring[i].dma;
  741. tdlen = adapter->tx_ring[i].count *
  742. sizeof(union ixgbe_adv_tx_desc);
  743. IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK));
  744. IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
  745. IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen);
  746. IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
  747. IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
  748. adapter->tx_ring[i].head = IXGBE_TDH(i);
  749. adapter->tx_ring[i].tail = IXGBE_TDT(i);
  750. }
  751. IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
  752. }
  753. #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
  754. (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
  755. #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
  756. /**
  757. * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
  758. * @adapter: board private structure
  759. *
  760. * Configure the Rx unit of the MAC after a reset.
  761. **/
  762. static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
  763. {
  764. u64 rdba;
  765. struct ixgbe_hw *hw = &adapter->hw;
  766. struct net_device *netdev = adapter->netdev;
  767. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  768. u32 rdlen, rxctrl, rxcsum;
  769. u32 random[10];
  770. u32 reta, mrqc;
  771. int i;
  772. u32 fctrl, hlreg0;
  773. u32 srrctl;
  774. u32 pages;
  775. /* Decide whether to use packet split mode or not */
  776. if (netdev->mtu > ETH_DATA_LEN)
  777. adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
  778. else
  779. adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
  780. /* Set the RX buffer length according to the mode */
  781. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  782. adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
  783. } else {
  784. if (netdev->mtu <= ETH_DATA_LEN)
  785. adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  786. else
  787. adapter->rx_buf_len = ALIGN(max_frame, 1024);
  788. }
  789. fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
  790. fctrl |= IXGBE_FCTRL_BAM;
  791. IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
  792. hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  793. if (adapter->netdev->mtu <= ETH_DATA_LEN)
  794. hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
  795. else
  796. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  797. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
  798. pages = PAGE_USE_COUNT(adapter->netdev->mtu);
  799. srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
  800. srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
  801. srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
  802. if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  803. srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  804. srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  805. srrctl |= ((IXGBE_RX_HDR_SIZE <<
  806. IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
  807. IXGBE_SRRCTL_BSIZEHDR_MASK);
  808. } else {
  809. srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
  810. if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
  811. srrctl |=
  812. IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  813. else
  814. srrctl |=
  815. adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  816. }
  817. IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
  818. rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
  819. /* disable receives while setting up the descriptors */
  820. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  821. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
  822. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  823. * the Base and Length of the Rx Descriptor Ring */
  824. for (i = 0; i < adapter->num_rx_queues; i++) {
  825. rdba = adapter->rx_ring[i].dma;
  826. IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
  827. IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
  828. IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
  829. IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
  830. IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
  831. adapter->rx_ring[i].head = IXGBE_RDH(i);
  832. adapter->rx_ring[i].tail = IXGBE_RDT(i);
  833. }
  834. if (adapter->num_rx_queues > 1) {
  835. /* Random 40bytes used as random key in RSS hash function */
  836. get_random_bytes(&random[0], 40);
  837. switch (adapter->num_rx_queues) {
  838. case 8:
  839. case 4:
  840. /* Bits [3:0] in each byte refers the Rx queue no */
  841. reta = 0x00010203;
  842. break;
  843. case 2:
  844. reta = 0x00010001;
  845. break;
  846. default:
  847. reta = 0x00000000;
  848. break;
  849. }
  850. /* Fill out redirection table */
  851. for (i = 0; i < 32; i++) {
  852. IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta);
  853. if (adapter->num_rx_queues > 4) {
  854. i++;
  855. IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i,
  856. 0x04050607);
  857. }
  858. }
  859. /* Fill out hash function seeds */
  860. for (i = 0; i < 10; i++)
  861. IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]);
  862. mrqc = IXGBE_MRQC_RSSEN
  863. /* Perform hash on these packet types */
  864. | IXGBE_MRQC_RSS_FIELD_IPV4
  865. | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
  866. | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
  867. | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
  868. | IXGBE_MRQC_RSS_FIELD_IPV6_EX
  869. | IXGBE_MRQC_RSS_FIELD_IPV6
  870. | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
  871. | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
  872. | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
  873. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  874. /* Multiqueue and packet checksumming are mutually exclusive. */
  875. rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  876. rxcsum |= IXGBE_RXCSUM_PCSD;
  877. IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
  878. } else {
  879. /* Enable Receive Checksum Offload for TCP and UDP */
  880. rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  881. if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
  882. /* Enable IPv4 payload checksum for UDP fragments
  883. * Must be used in conjunction with packet-split. */
  884. rxcsum |= IXGBE_RXCSUM_IPPCSE;
  885. } else {
  886. /* don't need to clear IPPCSE as it defaults to 0 */
  887. }
  888. IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
  889. }
  890. /* Enable Receives */
  891. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
  892. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  893. }
  894. static void ixgbe_vlan_rx_register(struct net_device *netdev,
  895. struct vlan_group *grp)
  896. {
  897. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  898. u32 ctrl;
  899. ixgbe_irq_disable(adapter);
  900. adapter->vlgrp = grp;
  901. if (grp) {
  902. /* enable VLAN tag insert/strip */
  903. ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
  904. ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
  905. ctrl &= ~IXGBE_VLNCTRL_CFIEN;
  906. IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
  907. }
  908. ixgbe_irq_enable(adapter);
  909. }
  910. static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  911. {
  912. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  913. /* add VID to filter table */
  914. ixgbe_set_vfta(&adapter->hw, vid, 0, true);
  915. }
  916. static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  917. {
  918. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  919. ixgbe_irq_disable(adapter);
  920. vlan_group_set_device(adapter->vlgrp, vid, NULL);
  921. ixgbe_irq_enable(adapter);
  922. /* remove VID from filter table */
  923. ixgbe_set_vfta(&adapter->hw, vid, 0, false);
  924. }
  925. static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  926. {
  927. ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  928. if (adapter->vlgrp) {
  929. u16 vid;
  930. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  931. if (!vlan_group_get_device(adapter->vlgrp, vid))
  932. continue;
  933. ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
  934. }
  935. }
  936. }
  937. /**
  938. * ixgbe_set_multi - Multicast and Promiscuous mode set
  939. * @netdev: network interface device structure
  940. *
  941. * The set_multi entry point is called whenever the multicast address
  942. * list or the network interface flags are updated. This routine is
  943. * responsible for configuring the hardware for proper multicast,
  944. * promiscuous mode, and all-multi behavior.
  945. **/
  946. static void ixgbe_set_multi(struct net_device *netdev)
  947. {
  948. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  949. struct ixgbe_hw *hw = &adapter->hw;
  950. struct dev_mc_list *mc_ptr;
  951. u8 *mta_list;
  952. u32 fctrl;
  953. int i;
  954. /* Check for Promiscuous and All Multicast modes */
  955. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  956. if (netdev->flags & IFF_PROMISC) {
  957. fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  958. } else if (netdev->flags & IFF_ALLMULTI) {
  959. fctrl |= IXGBE_FCTRL_MPE;
  960. fctrl &= ~IXGBE_FCTRL_UPE;
  961. } else {
  962. fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  963. }
  964. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  965. if (netdev->mc_count) {
  966. mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
  967. if (!mta_list)
  968. return;
  969. /* Shared function expects packed array of only addresses. */
  970. mc_ptr = netdev->mc_list;
  971. for (i = 0; i < netdev->mc_count; i++) {
  972. if (!mc_ptr)
  973. break;
  974. memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
  975. ETH_ALEN);
  976. mc_ptr = mc_ptr->next;
  977. }
  978. ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
  979. kfree(mta_list);
  980. } else {
  981. ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
  982. }
  983. }
  984. static void ixgbe_configure(struct ixgbe_adapter *adapter)
  985. {
  986. struct net_device *netdev = adapter->netdev;
  987. int i;
  988. ixgbe_set_multi(netdev);
  989. ixgbe_restore_vlan(adapter);
  990. ixgbe_configure_tx(adapter);
  991. ixgbe_configure_rx(adapter);
  992. for (i = 0; i < adapter->num_rx_queues; i++)
  993. ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
  994. (adapter->rx_ring[i].count - 1));
  995. }
  996. static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
  997. {
  998. struct net_device *netdev = adapter->netdev;
  999. int i;
  1000. u32 gpie = 0;
  1001. struct ixgbe_hw *hw = &adapter->hw;
  1002. u32 txdctl, rxdctl, mhadd;
  1003. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1004. if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
  1005. IXGBE_FLAG_MSI_ENABLED)) {
  1006. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  1007. gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
  1008. IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
  1009. } else {
  1010. /* MSI only */
  1011. gpie = (IXGBE_GPIE_EIAME |
  1012. IXGBE_GPIE_PBA_SUPPORT);
  1013. }
  1014. IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
  1015. gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
  1016. }
  1017. mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
  1018. if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
  1019. mhadd &= ~IXGBE_MHADD_MFS_MASK;
  1020. mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
  1021. IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
  1022. }
  1023. for (i = 0; i < adapter->num_tx_queues; i++) {
  1024. txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
  1025. txdctl |= IXGBE_TXDCTL_ENABLE;
  1026. IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
  1027. }
  1028. for (i = 0; i < adapter->num_rx_queues; i++) {
  1029. rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
  1030. rxdctl |= IXGBE_RXDCTL_ENABLE;
  1031. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
  1032. }
  1033. /* enable all receives */
  1034. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  1035. rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
  1036. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
  1037. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  1038. ixgbe_configure_msix(adapter);
  1039. else
  1040. ixgbe_configure_msi_and_legacy(adapter);
  1041. clear_bit(__IXGBE_DOWN, &adapter->state);
  1042. napi_enable(&adapter->napi);
  1043. ixgbe_irq_enable(adapter);
  1044. /* bring the link up in the watchdog, this could race with our first
  1045. * link up interrupt but shouldn't be a problem */
  1046. mod_timer(&adapter->watchdog_timer, jiffies);
  1047. return 0;
  1048. }
  1049. int ixgbe_up(struct ixgbe_adapter *adapter)
  1050. {
  1051. /* hardware has been reset, we need to reload some things */
  1052. ixgbe_configure(adapter);
  1053. return ixgbe_up_complete(adapter);
  1054. }
  1055. void ixgbe_reset(struct ixgbe_adapter *adapter)
  1056. {
  1057. if (ixgbe_init_hw(&adapter->hw))
  1058. DPRINTK(PROBE, ERR, "Hardware Error\n");
  1059. /* reprogram the RAR[0] in case user changed it. */
  1060. ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
  1061. }
  1062. #ifdef CONFIG_PM
  1063. static int ixgbe_resume(struct pci_dev *pdev)
  1064. {
  1065. struct net_device *netdev = pci_get_drvdata(pdev);
  1066. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1067. u32 err, num_rx_queues = adapter->num_rx_queues;
  1068. pci_set_power_state(pdev, PCI_D0);
  1069. pci_restore_state(pdev);
  1070. err = pci_enable_device(pdev);
  1071. if (err) {
  1072. printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
  1073. "suspend\n");
  1074. return err;
  1075. }
  1076. pci_set_master(pdev);
  1077. pci_enable_wake(pdev, PCI_D3hot, 0);
  1078. pci_enable_wake(pdev, PCI_D3cold, 0);
  1079. if (netif_running(netdev)) {
  1080. err = ixgbe_request_irq(adapter, &num_rx_queues);
  1081. if (err)
  1082. return err;
  1083. }
  1084. ixgbe_reset(adapter);
  1085. if (netif_running(netdev))
  1086. ixgbe_up(adapter);
  1087. netif_device_attach(netdev);
  1088. return 0;
  1089. }
  1090. #endif
  1091. /**
  1092. * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  1093. * @adapter: board private structure
  1094. * @rx_ring: ring to free buffers from
  1095. **/
  1096. static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
  1097. struct ixgbe_ring *rx_ring)
  1098. {
  1099. struct pci_dev *pdev = adapter->pdev;
  1100. unsigned long size;
  1101. unsigned int i;
  1102. /* Free all the Rx ring sk_buffs */
  1103. for (i = 0; i < rx_ring->count; i++) {
  1104. struct ixgbe_rx_buffer *rx_buffer_info;
  1105. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  1106. if (rx_buffer_info->dma) {
  1107. pci_unmap_single(pdev, rx_buffer_info->dma,
  1108. adapter->rx_buf_len,
  1109. PCI_DMA_FROMDEVICE);
  1110. rx_buffer_info->dma = 0;
  1111. }
  1112. if (rx_buffer_info->skb) {
  1113. dev_kfree_skb(rx_buffer_info->skb);
  1114. rx_buffer_info->skb = NULL;
  1115. }
  1116. if (!rx_buffer_info->page)
  1117. continue;
  1118. pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
  1119. PCI_DMA_FROMDEVICE);
  1120. rx_buffer_info->page_dma = 0;
  1121. put_page(rx_buffer_info->page);
  1122. rx_buffer_info->page = NULL;
  1123. }
  1124. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  1125. memset(rx_ring->rx_buffer_info, 0, size);
  1126. /* Zero out the descriptor ring */
  1127. memset(rx_ring->desc, 0, rx_ring->size);
  1128. rx_ring->next_to_clean = 0;
  1129. rx_ring->next_to_use = 0;
  1130. writel(0, adapter->hw.hw_addr + rx_ring->head);
  1131. writel(0, adapter->hw.hw_addr + rx_ring->tail);
  1132. }
  1133. /**
  1134. * ixgbe_clean_tx_ring - Free Tx Buffers
  1135. * @adapter: board private structure
  1136. * @tx_ring: ring to be cleaned
  1137. **/
  1138. static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
  1139. struct ixgbe_ring *tx_ring)
  1140. {
  1141. struct ixgbe_tx_buffer *tx_buffer_info;
  1142. unsigned long size;
  1143. unsigned int i;
  1144. /* Free all the Tx ring sk_buffs */
  1145. for (i = 0; i < tx_ring->count; i++) {
  1146. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  1147. ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
  1148. }
  1149. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  1150. memset(tx_ring->tx_buffer_info, 0, size);
  1151. /* Zero out the descriptor ring */
  1152. memset(tx_ring->desc, 0, tx_ring->size);
  1153. tx_ring->next_to_use = 0;
  1154. tx_ring->next_to_clean = 0;
  1155. writel(0, adapter->hw.hw_addr + tx_ring->head);
  1156. writel(0, adapter->hw.hw_addr + tx_ring->tail);
  1157. }
  1158. /**
  1159. * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  1160. * @adapter: board private structure
  1161. **/
  1162. static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
  1163. {
  1164. int i;
  1165. for (i = 0; i < adapter->num_tx_queues; i++)
  1166. ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
  1167. }
  1168. /**
  1169. * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  1170. * @adapter: board private structure
  1171. **/
  1172. static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
  1173. {
  1174. int i;
  1175. for (i = 0; i < adapter->num_rx_queues; i++)
  1176. ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
  1177. }
  1178. void ixgbe_down(struct ixgbe_adapter *adapter)
  1179. {
  1180. struct net_device *netdev = adapter->netdev;
  1181. u32 rxctrl;
  1182. /* signal that we are down to the interrupt handler */
  1183. set_bit(__IXGBE_DOWN, &adapter->state);
  1184. /* disable receives */
  1185. rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
  1186. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
  1187. rxctrl & ~IXGBE_RXCTRL_RXEN);
  1188. netif_tx_disable(netdev);
  1189. /* disable transmits in the hardware */
  1190. /* flush both disables */
  1191. IXGBE_WRITE_FLUSH(&adapter->hw);
  1192. msleep(10);
  1193. napi_disable(&adapter->napi);
  1194. atomic_set(&adapter->irq_sem, 0);
  1195. ixgbe_irq_disable(adapter);
  1196. del_timer_sync(&adapter->watchdog_timer);
  1197. netif_carrier_off(netdev);
  1198. netif_stop_queue(netdev);
  1199. ixgbe_reset(adapter);
  1200. ixgbe_clean_all_tx_rings(adapter);
  1201. ixgbe_clean_all_rx_rings(adapter);
  1202. }
  1203. static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
  1204. {
  1205. struct net_device *netdev = pci_get_drvdata(pdev);
  1206. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1207. #ifdef CONFIG_PM
  1208. int retval = 0;
  1209. #endif
  1210. netif_device_detach(netdev);
  1211. if (netif_running(netdev)) {
  1212. ixgbe_down(adapter);
  1213. ixgbe_free_irq(adapter);
  1214. }
  1215. #ifdef CONFIG_PM
  1216. retval = pci_save_state(pdev);
  1217. if (retval)
  1218. return retval;
  1219. #endif
  1220. pci_enable_wake(pdev, PCI_D3hot, 0);
  1221. pci_enable_wake(pdev, PCI_D3cold, 0);
  1222. pci_disable_device(pdev);
  1223. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1224. return 0;
  1225. }
  1226. static void ixgbe_shutdown(struct pci_dev *pdev)
  1227. {
  1228. ixgbe_suspend(pdev, PMSG_SUSPEND);
  1229. }
  1230. /**
  1231. * ixgbe_clean - NAPI Rx polling callback
  1232. * @adapter: board private structure
  1233. **/
  1234. static int ixgbe_clean(struct napi_struct *napi, int budget)
  1235. {
  1236. struct ixgbe_adapter *adapter = container_of(napi,
  1237. struct ixgbe_adapter, napi);
  1238. struct net_device *netdev = adapter->netdev;
  1239. int tx_cleaned = 0, work_done = 0;
  1240. /* In non-MSIX case, there is no multi-Tx/Rx queue */
  1241. tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
  1242. ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
  1243. budget);
  1244. if (tx_cleaned)
  1245. work_done = budget;
  1246. /* If budget not fully consumed, exit the polling mode */
  1247. if (work_done < budget) {
  1248. netif_rx_complete(netdev, napi);
  1249. ixgbe_irq_enable(adapter);
  1250. }
  1251. return work_done;
  1252. }
  1253. /**
  1254. * ixgbe_tx_timeout - Respond to a Tx Hang
  1255. * @netdev: network interface device structure
  1256. **/
  1257. static void ixgbe_tx_timeout(struct net_device *netdev)
  1258. {
  1259. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1260. /* Do the reset outside of interrupt context */
  1261. schedule_work(&adapter->reset_task);
  1262. }
  1263. static void ixgbe_reset_task(struct work_struct *work)
  1264. {
  1265. struct ixgbe_adapter *adapter;
  1266. adapter = container_of(work, struct ixgbe_adapter, reset_task);
  1267. adapter->tx_timeout_count++;
  1268. ixgbe_down(adapter);
  1269. ixgbe_up(adapter);
  1270. }
  1271. /**
  1272. * ixgbe_alloc_queues - Allocate memory for all rings
  1273. * @adapter: board private structure to initialize
  1274. *
  1275. * We allocate one ring per queue at run-time since we don't know the
  1276. * number of queues at compile-time. The polling_netdev array is
  1277. * intended for Multiqueue, but should work fine with a single queue.
  1278. **/
  1279. static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
  1280. {
  1281. int i;
  1282. adapter->tx_ring = kcalloc(adapter->num_tx_queues,
  1283. sizeof(struct ixgbe_ring), GFP_KERNEL);
  1284. if (!adapter->tx_ring)
  1285. return -ENOMEM;
  1286. for (i = 0; i < adapter->num_tx_queues; i++)
  1287. adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
  1288. adapter->rx_ring = kcalloc(adapter->num_rx_queues,
  1289. sizeof(struct ixgbe_ring), GFP_KERNEL);
  1290. if (!adapter->rx_ring) {
  1291. kfree(adapter->tx_ring);
  1292. return -ENOMEM;
  1293. }
  1294. for (i = 0; i < adapter->num_rx_queues; i++) {
  1295. adapter->rx_ring[i].adapter = adapter;
  1296. adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
  1297. adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
  1298. }
  1299. return 0;
  1300. }
  1301. /**
  1302. * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  1303. * @adapter: board private structure to initialize
  1304. *
  1305. * ixgbe_sw_init initializes the Adapter private data structure.
  1306. * Fields are initialized based on PCI device information and
  1307. * OS network device settings (MTU size).
  1308. **/
  1309. static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
  1310. {
  1311. struct ixgbe_hw *hw = &adapter->hw;
  1312. struct pci_dev *pdev = adapter->pdev;
  1313. /* default flow control settings */
  1314. hw->fc.original_type = ixgbe_fc_full;
  1315. hw->fc.type = ixgbe_fc_full;
  1316. hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
  1317. if (hw->mac.ops.reset(hw)) {
  1318. dev_err(&pdev->dev, "HW Init failed\n");
  1319. return -EIO;
  1320. }
  1321. if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
  1322. false)) {
  1323. dev_err(&pdev->dev, "Link Speed setup failed\n");
  1324. return -EIO;
  1325. }
  1326. /* initialize eeprom parameters */
  1327. if (ixgbe_init_eeprom(hw)) {
  1328. dev_err(&pdev->dev, "EEPROM initialization failed\n");
  1329. return -EIO;
  1330. }
  1331. /* Set the default values */
  1332. adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
  1333. adapter->num_tx_queues = 1;
  1334. adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
  1335. if (ixgbe_alloc_queues(adapter)) {
  1336. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  1337. return -ENOMEM;
  1338. }
  1339. atomic_set(&adapter->irq_sem, 1);
  1340. set_bit(__IXGBE_DOWN, &adapter->state);
  1341. return 0;
  1342. }
  1343. /**
  1344. * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  1345. * @adapter: board private structure
  1346. * @txdr: tx descriptor ring (for a specific queue) to setup
  1347. *
  1348. * Return 0 on success, negative on failure
  1349. **/
  1350. int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
  1351. struct ixgbe_ring *txdr)
  1352. {
  1353. struct pci_dev *pdev = adapter->pdev;
  1354. int size;
  1355. size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
  1356. txdr->tx_buffer_info = vmalloc(size);
  1357. if (!txdr->tx_buffer_info) {
  1358. DPRINTK(PROBE, ERR,
  1359. "Unable to allocate memory for the transmit descriptor ring\n");
  1360. return -ENOMEM;
  1361. }
  1362. memset(txdr->tx_buffer_info, 0, size);
  1363. /* round up to nearest 4K */
  1364. txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
  1365. txdr->size = ALIGN(txdr->size, 4096);
  1366. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1367. if (!txdr->desc) {
  1368. vfree(txdr->tx_buffer_info);
  1369. DPRINTK(PROBE, ERR,
  1370. "Memory allocation failed for the tx desc ring\n");
  1371. return -ENOMEM;
  1372. }
  1373. txdr->adapter = adapter;
  1374. txdr->next_to_use = 0;
  1375. txdr->next_to_clean = 0;
  1376. txdr->work_limit = txdr->count;
  1377. spin_lock_init(&txdr->tx_lock);
  1378. return 0;
  1379. }
  1380. /**
  1381. * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  1382. * @adapter: board private structure
  1383. * @rxdr: rx descriptor ring (for a specific queue) to setup
  1384. *
  1385. * Returns 0 on success, negative on failure
  1386. **/
  1387. int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
  1388. struct ixgbe_ring *rxdr)
  1389. {
  1390. struct pci_dev *pdev = adapter->pdev;
  1391. int size, desc_len;
  1392. size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
  1393. rxdr->rx_buffer_info = vmalloc(size);
  1394. if (!rxdr->rx_buffer_info) {
  1395. DPRINTK(PROBE, ERR,
  1396. "vmalloc allocation failed for the rx desc ring\n");
  1397. return -ENOMEM;
  1398. }
  1399. memset(rxdr->rx_buffer_info, 0, size);
  1400. desc_len = sizeof(union ixgbe_adv_rx_desc);
  1401. /* Round up to nearest 4K */
  1402. rxdr->size = rxdr->count * desc_len;
  1403. rxdr->size = ALIGN(rxdr->size, 4096);
  1404. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1405. if (!rxdr->desc) {
  1406. DPRINTK(PROBE, ERR,
  1407. "Memory allocation failed for the rx desc ring\n");
  1408. vfree(rxdr->rx_buffer_info);
  1409. return -ENOMEM;
  1410. }
  1411. rxdr->next_to_clean = 0;
  1412. rxdr->next_to_use = 0;
  1413. rxdr->adapter = adapter;
  1414. return 0;
  1415. }
  1416. /**
  1417. * ixgbe_free_tx_resources - Free Tx Resources per Queue
  1418. * @adapter: board private structure
  1419. * @tx_ring: Tx descriptor ring for a specific queue
  1420. *
  1421. * Free all transmit software resources
  1422. **/
  1423. static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
  1424. struct ixgbe_ring *tx_ring)
  1425. {
  1426. struct pci_dev *pdev = adapter->pdev;
  1427. ixgbe_clean_tx_ring(adapter, tx_ring);
  1428. vfree(tx_ring->tx_buffer_info);
  1429. tx_ring->tx_buffer_info = NULL;
  1430. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  1431. tx_ring->desc = NULL;
  1432. }
  1433. /**
  1434. * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
  1435. * @adapter: board private structure
  1436. *
  1437. * Free all transmit software resources
  1438. **/
  1439. static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
  1440. {
  1441. int i;
  1442. for (i = 0; i < adapter->num_tx_queues; i++)
  1443. ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
  1444. }
  1445. /**
  1446. * ixgbe_free_rx_resources - Free Rx Resources
  1447. * @adapter: board private structure
  1448. * @rx_ring: ring to clean the resources from
  1449. *
  1450. * Free all receive software resources
  1451. **/
  1452. static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
  1453. struct ixgbe_ring *rx_ring)
  1454. {
  1455. struct pci_dev *pdev = adapter->pdev;
  1456. ixgbe_clean_rx_ring(adapter, rx_ring);
  1457. vfree(rx_ring->rx_buffer_info);
  1458. rx_ring->rx_buffer_info = NULL;
  1459. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  1460. rx_ring->desc = NULL;
  1461. }
  1462. /**
  1463. * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
  1464. * @adapter: board private structure
  1465. *
  1466. * Free all receive software resources
  1467. **/
  1468. static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
  1469. {
  1470. int i;
  1471. for (i = 0; i < adapter->num_rx_queues; i++)
  1472. ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
  1473. }
  1474. /**
  1475. * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources
  1476. * (Descriptors) for all queues
  1477. * @adapter: board private structure
  1478. *
  1479. * If this function returns with an error, then it's possible one or
  1480. * more of the rings is populated (while the rest are not). It is the
  1481. * callers duty to clean those orphaned rings.
  1482. *
  1483. * Return 0 on success, negative on failure
  1484. **/
  1485. static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
  1486. {
  1487. int i, err = 0;
  1488. for (i = 0; i < adapter->num_tx_queues; i++) {
  1489. err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  1490. if (err) {
  1491. DPRINTK(PROBE, ERR,
  1492. "Allocation for Tx Queue %u failed\n", i);
  1493. break;
  1494. }
  1495. }
  1496. return err;
  1497. }
  1498. /**
  1499. * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources
  1500. * (Descriptors) for all queues
  1501. * @adapter: board private structure
  1502. *
  1503. * If this function returns with an error, then it's possible one or
  1504. * more of the rings is populated (while the rest are not). It is the
  1505. * callers duty to clean those orphaned rings.
  1506. *
  1507. * Return 0 on success, negative on failure
  1508. **/
  1509. static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
  1510. {
  1511. int i, err = 0;
  1512. for (i = 0; i < adapter->num_rx_queues; i++) {
  1513. err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  1514. if (err) {
  1515. DPRINTK(PROBE, ERR,
  1516. "Allocation for Rx Queue %u failed\n", i);
  1517. break;
  1518. }
  1519. }
  1520. return err;
  1521. }
  1522. /**
  1523. * ixgbe_change_mtu - Change the Maximum Transfer Unit
  1524. * @netdev: network interface device structure
  1525. * @new_mtu: new value for maximum frame size
  1526. *
  1527. * Returns 0 on success, negative on failure
  1528. **/
  1529. static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  1530. {
  1531. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1532. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  1533. if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
  1534. (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
  1535. return -EINVAL;
  1536. netdev->mtu = new_mtu;
  1537. if (netif_running(netdev)) {
  1538. ixgbe_down(adapter);
  1539. ixgbe_up(adapter);
  1540. }
  1541. return 0;
  1542. }
  1543. /**
  1544. * ixgbe_open - Called when a network interface is made active
  1545. * @netdev: network interface device structure
  1546. *
  1547. * Returns 0 on success, negative value on failure
  1548. *
  1549. * The open entry point is called when a network interface is made
  1550. * active by the system (IFF_UP). At this point all resources needed
  1551. * for transmit and receive operations are allocated, the interrupt
  1552. * handler is registered with the OS, the watchdog timer is started,
  1553. * and the stack is notified that the interface is ready.
  1554. **/
  1555. static int ixgbe_open(struct net_device *netdev)
  1556. {
  1557. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1558. int err;
  1559. u32 ctrl_ext;
  1560. u32 num_rx_queues = adapter->num_rx_queues;
  1561. /* Let firmware know the driver has taken over */
  1562. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  1563. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  1564. ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  1565. try_intr_reinit:
  1566. /* allocate transmit descriptors */
  1567. err = ixgbe_setup_all_tx_resources(adapter);
  1568. if (err)
  1569. goto err_setup_tx;
  1570. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  1571. num_rx_queues = 1;
  1572. adapter->num_rx_queues = num_rx_queues;
  1573. }
  1574. /* allocate receive descriptors */
  1575. err = ixgbe_setup_all_rx_resources(adapter);
  1576. if (err)
  1577. goto err_setup_rx;
  1578. ixgbe_configure(adapter);
  1579. err = ixgbe_request_irq(adapter, &num_rx_queues);
  1580. if (err)
  1581. goto err_req_irq;
  1582. /* ixgbe_request might have reduced num_rx_queues */
  1583. if (num_rx_queues < adapter->num_rx_queues) {
  1584. /* We didn't get MSI-X, so we need to release everything,
  1585. * set our Rx queue count to num_rx_queues, and redo the
  1586. * whole init process.
  1587. */
  1588. ixgbe_free_irq(adapter);
  1589. if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1590. pci_disable_msi(adapter->pdev);
  1591. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  1592. }
  1593. ixgbe_free_all_rx_resources(adapter);
  1594. ixgbe_free_all_tx_resources(adapter);
  1595. adapter->num_rx_queues = num_rx_queues;
  1596. /* Reset the hardware, and start over. */
  1597. ixgbe_reset(adapter);
  1598. goto try_intr_reinit;
  1599. }
  1600. err = ixgbe_up_complete(adapter);
  1601. if (err)
  1602. goto err_up;
  1603. return 0;
  1604. err_up:
  1605. ixgbe_free_irq(adapter);
  1606. err_req_irq:
  1607. ixgbe_free_all_rx_resources(adapter);
  1608. err_setup_rx:
  1609. ixgbe_free_all_tx_resources(adapter);
  1610. err_setup_tx:
  1611. ixgbe_reset(adapter);
  1612. return err;
  1613. }
  1614. /**
  1615. * ixgbe_close - Disables a network interface
  1616. * @netdev: network interface device structure
  1617. *
  1618. * Returns 0, this is not allowed to fail
  1619. *
  1620. * The close entry point is called when an interface is de-activated
  1621. * by the OS. The hardware is still under the drivers control, but
  1622. * needs to be disabled. A global MAC reset is issued to stop the
  1623. * hardware, and all transmit and receive resources are freed.
  1624. **/
  1625. static int ixgbe_close(struct net_device *netdev)
  1626. {
  1627. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1628. u32 ctrl_ext;
  1629. ixgbe_down(adapter);
  1630. ixgbe_free_irq(adapter);
  1631. ixgbe_free_all_tx_resources(adapter);
  1632. ixgbe_free_all_rx_resources(adapter);
  1633. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  1634. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  1635. ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
  1636. return 0;
  1637. }
  1638. /**
  1639. * ixgbe_update_stats - Update the board statistics counters.
  1640. * @adapter: board private structure
  1641. **/
  1642. void ixgbe_update_stats(struct ixgbe_adapter *adapter)
  1643. {
  1644. struct ixgbe_hw *hw = &adapter->hw;
  1645. u64 good_rx, missed_rx, bprc;
  1646. adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
  1647. good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
  1648. missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
  1649. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
  1650. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
  1651. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
  1652. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
  1653. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
  1654. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
  1655. missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
  1656. adapter->stats.gprc += (good_rx - missed_rx);
  1657. adapter->stats.mpc[0] += missed_rx;
  1658. adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
  1659. bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
  1660. adapter->stats.bprc += bprc;
  1661. adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
  1662. adapter->stats.mprc -= bprc;
  1663. adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
  1664. adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
  1665. adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
  1666. adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
  1667. adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
  1668. adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
  1669. adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
  1670. adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
  1671. adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
  1672. adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
  1673. adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
  1674. adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
  1675. adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  1676. adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
  1677. adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
  1678. adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
  1679. adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  1680. adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
  1681. adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
  1682. adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
  1683. adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
  1684. adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
  1685. adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
  1686. adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
  1687. adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
  1688. adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
  1689. adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
  1690. adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
  1691. adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
  1692. /* Fill out the OS statistics structure */
  1693. adapter->net_stats.rx_packets = adapter->stats.gprc;
  1694. adapter->net_stats.tx_packets = adapter->stats.gptc;
  1695. adapter->net_stats.rx_bytes = adapter->stats.gorc;
  1696. adapter->net_stats.tx_bytes = adapter->stats.gotc;
  1697. adapter->net_stats.multicast = adapter->stats.mprc;
  1698. /* Rx Errors */
  1699. adapter->net_stats.rx_errors = adapter->stats.crcerrs +
  1700. adapter->stats.rlec;
  1701. adapter->net_stats.rx_dropped = 0;
  1702. adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  1703. adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  1704. adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
  1705. }
  1706. /**
  1707. * ixgbe_watchdog - Timer Call-back
  1708. * @data: pointer to adapter cast into an unsigned long
  1709. **/
  1710. static void ixgbe_watchdog(unsigned long data)
  1711. {
  1712. struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
  1713. struct net_device *netdev = adapter->netdev;
  1714. bool link_up;
  1715. u32 link_speed = 0;
  1716. adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
  1717. if (link_up) {
  1718. if (!netif_carrier_ok(netdev)) {
  1719. u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
  1720. u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
  1721. #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
  1722. #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
  1723. DPRINTK(LINK, INFO, "NIC Link is Up %s, "
  1724. "Flow Control: %s\n",
  1725. (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
  1726. "10 Gbps" :
  1727. (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
  1728. "1 Gpbs" : "unknown speed")),
  1729. ((FLOW_RX && FLOW_TX) ? "RX/TX" :
  1730. (FLOW_RX ? "RX" :
  1731. (FLOW_TX ? "TX" : "None"))));
  1732. netif_carrier_on(netdev);
  1733. netif_wake_queue(netdev);
  1734. } else {
  1735. /* Force detection of hung controller */
  1736. adapter->detect_tx_hung = true;
  1737. }
  1738. } else {
  1739. if (netif_carrier_ok(netdev)) {
  1740. DPRINTK(LINK, INFO, "NIC Link is Down\n");
  1741. netif_carrier_off(netdev);
  1742. netif_stop_queue(netdev);
  1743. }
  1744. }
  1745. ixgbe_update_stats(adapter);
  1746. /* Reset the timer */
  1747. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  1748. mod_timer(&adapter->watchdog_timer,
  1749. round_jiffies(jiffies + 2 * HZ));
  1750. }
  1751. #define IXGBE_MAX_TXD_PWR 14
  1752. #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
  1753. /* Tx Descriptors needed, worst case */
  1754. #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
  1755. (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  1756. #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
  1757. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
  1758. static int ixgbe_tso(struct ixgbe_adapter *adapter,
  1759. struct ixgbe_ring *tx_ring, struct sk_buff *skb,
  1760. u32 tx_flags, u8 *hdr_len)
  1761. {
  1762. struct ixgbe_adv_tx_context_desc *context_desc;
  1763. unsigned int i;
  1764. int err;
  1765. struct ixgbe_tx_buffer *tx_buffer_info;
  1766. u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
  1767. u32 mss_l4len_idx = 0, l4len;
  1768. *hdr_len = 0;
  1769. if (skb_is_gso(skb)) {
  1770. if (skb_header_cloned(skb)) {
  1771. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  1772. if (err)
  1773. return err;
  1774. }
  1775. l4len = tcp_hdrlen(skb);
  1776. *hdr_len += l4len;
  1777. if (skb->protocol == htons(ETH_P_IP)) {
  1778. struct iphdr *iph = ip_hdr(skb);
  1779. iph->tot_len = 0;
  1780. iph->check = 0;
  1781. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  1782. iph->daddr, 0,
  1783. IPPROTO_TCP,
  1784. 0);
  1785. adapter->hw_tso_ctxt++;
  1786. } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
  1787. ipv6_hdr(skb)->payload_len = 0;
  1788. tcp_hdr(skb)->check =
  1789. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  1790. &ipv6_hdr(skb)->daddr,
  1791. 0, IPPROTO_TCP, 0);
  1792. adapter->hw_tso6_ctxt++;
  1793. }
  1794. i = tx_ring->next_to_use;
  1795. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  1796. context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
  1797. /* VLAN MACLEN IPLEN */
  1798. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  1799. vlan_macip_lens |=
  1800. (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
  1801. vlan_macip_lens |= ((skb_network_offset(skb)) <<
  1802. IXGBE_ADVTXD_MACLEN_SHIFT);
  1803. *hdr_len += skb_network_offset(skb);
  1804. vlan_macip_lens |=
  1805. (skb_transport_header(skb) - skb_network_header(skb));
  1806. *hdr_len +=
  1807. (skb_transport_header(skb) - skb_network_header(skb));
  1808. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  1809. context_desc->seqnum_seed = 0;
  1810. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  1811. type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
  1812. IXGBE_ADVTXD_DTYP_CTXT);
  1813. if (skb->protocol == htons(ETH_P_IP))
  1814. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  1815. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
  1816. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
  1817. /* MSS L4LEN IDX */
  1818. mss_l4len_idx |=
  1819. (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
  1820. mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
  1821. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  1822. tx_buffer_info->time_stamp = jiffies;
  1823. tx_buffer_info->next_to_watch = i;
  1824. i++;
  1825. if (i == tx_ring->count)
  1826. i = 0;
  1827. tx_ring->next_to_use = i;
  1828. return true;
  1829. }
  1830. return false;
  1831. }
  1832. static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
  1833. struct ixgbe_ring *tx_ring,
  1834. struct sk_buff *skb, u32 tx_flags)
  1835. {
  1836. struct ixgbe_adv_tx_context_desc *context_desc;
  1837. unsigned int i;
  1838. struct ixgbe_tx_buffer *tx_buffer_info;
  1839. u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
  1840. if (skb->ip_summed == CHECKSUM_PARTIAL ||
  1841. (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
  1842. i = tx_ring->next_to_use;
  1843. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  1844. context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
  1845. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  1846. vlan_macip_lens |=
  1847. (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
  1848. vlan_macip_lens |= (skb_network_offset(skb) <<
  1849. IXGBE_ADVTXD_MACLEN_SHIFT);
  1850. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1851. vlan_macip_lens |= (skb_transport_header(skb) -
  1852. skb_network_header(skb));
  1853. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  1854. context_desc->seqnum_seed = 0;
  1855. type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
  1856. IXGBE_ADVTXD_DTYP_CTXT);
  1857. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1858. if (skb->protocol == htons(ETH_P_IP))
  1859. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  1860. if (skb->sk->sk_protocol == IPPROTO_TCP)
  1861. type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
  1862. }
  1863. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
  1864. context_desc->mss_l4len_idx = 0;
  1865. tx_buffer_info->time_stamp = jiffies;
  1866. tx_buffer_info->next_to_watch = i;
  1867. adapter->hw_csum_tx_good++;
  1868. i++;
  1869. if (i == tx_ring->count)
  1870. i = 0;
  1871. tx_ring->next_to_use = i;
  1872. return true;
  1873. }
  1874. return false;
  1875. }
  1876. static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
  1877. struct ixgbe_ring *tx_ring,
  1878. struct sk_buff *skb, unsigned int first)
  1879. {
  1880. struct ixgbe_tx_buffer *tx_buffer_info;
  1881. unsigned int len = skb->len;
  1882. unsigned int offset = 0, size, count = 0, i;
  1883. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  1884. unsigned int f;
  1885. len -= skb->data_len;
  1886. i = tx_ring->next_to_use;
  1887. while (len) {
  1888. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  1889. size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  1890. tx_buffer_info->length = size;
  1891. tx_buffer_info->dma = pci_map_single(adapter->pdev,
  1892. skb->data + offset,
  1893. size, PCI_DMA_TODEVICE);
  1894. tx_buffer_info->time_stamp = jiffies;
  1895. tx_buffer_info->next_to_watch = i;
  1896. len -= size;
  1897. offset += size;
  1898. count++;
  1899. i++;
  1900. if (i == tx_ring->count)
  1901. i = 0;
  1902. }
  1903. for (f = 0; f < nr_frags; f++) {
  1904. struct skb_frag_struct *frag;
  1905. frag = &skb_shinfo(skb)->frags[f];
  1906. len = frag->size;
  1907. offset = frag->page_offset;
  1908. while (len) {
  1909. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  1910. size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  1911. tx_buffer_info->length = size;
  1912. tx_buffer_info->dma = pci_map_page(adapter->pdev,
  1913. frag->page,
  1914. offset,
  1915. size, PCI_DMA_TODEVICE);
  1916. tx_buffer_info->time_stamp = jiffies;
  1917. tx_buffer_info->next_to_watch = i;
  1918. len -= size;
  1919. offset += size;
  1920. count++;
  1921. i++;
  1922. if (i == tx_ring->count)
  1923. i = 0;
  1924. }
  1925. }
  1926. if (i == 0)
  1927. i = tx_ring->count - 1;
  1928. else
  1929. i = i - 1;
  1930. tx_ring->tx_buffer_info[i].skb = skb;
  1931. tx_ring->tx_buffer_info[first].next_to_watch = i;
  1932. return count;
  1933. }
  1934. static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
  1935. struct ixgbe_ring *tx_ring,
  1936. int tx_flags, int count, u32 paylen, u8 hdr_len)
  1937. {
  1938. union ixgbe_adv_tx_desc *tx_desc = NULL;
  1939. struct ixgbe_tx_buffer *tx_buffer_info;
  1940. u32 olinfo_status = 0, cmd_type_len = 0;
  1941. unsigned int i;
  1942. u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
  1943. cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
  1944. cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
  1945. if (tx_flags & IXGBE_TX_FLAGS_VLAN)
  1946. cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
  1947. if (tx_flags & IXGBE_TX_FLAGS_TSO) {
  1948. cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
  1949. olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
  1950. IXGBE_ADVTXD_POPTS_SHIFT;
  1951. if (tx_flags & IXGBE_TX_FLAGS_IPV4)
  1952. olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
  1953. IXGBE_ADVTXD_POPTS_SHIFT;
  1954. } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
  1955. olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
  1956. IXGBE_ADVTXD_POPTS_SHIFT;
  1957. olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
  1958. i = tx_ring->next_to_use;
  1959. while (count--) {
  1960. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  1961. tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  1962. tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
  1963. tx_desc->read.cmd_type_len =
  1964. cpu_to_le32(cmd_type_len | tx_buffer_info->length);
  1965. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  1966. i++;
  1967. if (i == tx_ring->count)
  1968. i = 0;
  1969. }
  1970. tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
  1971. /*
  1972. * Force memory writes to complete before letting h/w
  1973. * know there are new descriptors to fetch. (Only
  1974. * applicable for weak-ordered memory model archs,
  1975. * such as IA-64).
  1976. */
  1977. wmb();
  1978. tx_ring->next_to_use = i;
  1979. writel(i, adapter->hw.hw_addr + tx_ring->tail);
  1980. }
  1981. static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1982. {
  1983. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1984. struct ixgbe_ring *tx_ring;
  1985. unsigned int len = skb->len;
  1986. unsigned int first;
  1987. unsigned int tx_flags = 0;
  1988. unsigned long flags = 0;
  1989. u8 hdr_len;
  1990. int tso;
  1991. unsigned int mss = 0;
  1992. int count = 0;
  1993. unsigned int f;
  1994. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  1995. len -= skb->data_len;
  1996. tx_ring = adapter->tx_ring;
  1997. if (skb->len <= 0) {
  1998. dev_kfree_skb(skb);
  1999. return NETDEV_TX_OK;
  2000. }
  2001. mss = skb_shinfo(skb)->gso_size;
  2002. if (mss)
  2003. count++;
  2004. else if (skb->ip_summed == CHECKSUM_PARTIAL)
  2005. count++;
  2006. count += TXD_USE_COUNT(len);
  2007. for (f = 0; f < nr_frags; f++)
  2008. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  2009. spin_lock_irqsave(&tx_ring->tx_lock, flags);
  2010. if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
  2011. adapter->tx_busy++;
  2012. netif_stop_queue(netdev);
  2013. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2014. return NETDEV_TX_BUSY;
  2015. }
  2016. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2017. if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
  2018. tx_flags |= IXGBE_TX_FLAGS_VLAN;
  2019. tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
  2020. }
  2021. if (skb->protocol == htons(ETH_P_IP))
  2022. tx_flags |= IXGBE_TX_FLAGS_IPV4;
  2023. first = tx_ring->next_to_use;
  2024. tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
  2025. if (tso < 0) {
  2026. dev_kfree_skb_any(skb);
  2027. return NETDEV_TX_OK;
  2028. }
  2029. if (tso)
  2030. tx_flags |= IXGBE_TX_FLAGS_TSO;
  2031. else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
  2032. (skb->ip_summed == CHECKSUM_PARTIAL))
  2033. tx_flags |= IXGBE_TX_FLAGS_CSUM;
  2034. ixgbe_tx_queue(adapter, tx_ring, tx_flags,
  2035. ixgbe_tx_map(adapter, tx_ring, skb, first),
  2036. skb->len, hdr_len);
  2037. netdev->trans_start = jiffies;
  2038. spin_lock_irqsave(&tx_ring->tx_lock, flags);
  2039. /* Make sure there is space in the ring for the next send. */
  2040. if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
  2041. netif_stop_queue(netdev);
  2042. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2043. return NETDEV_TX_OK;
  2044. }
  2045. /**
  2046. * ixgbe_get_stats - Get System Network Statistics
  2047. * @netdev: network interface device structure
  2048. *
  2049. * Returns the address of the device statistics structure.
  2050. * The statistics are actually updated from the timer callback.
  2051. **/
  2052. static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
  2053. {
  2054. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2055. /* only return the current stats */
  2056. return &adapter->net_stats;
  2057. }
  2058. /**
  2059. * ixgbe_set_mac - Change the Ethernet Address of the NIC
  2060. * @netdev: network interface device structure
  2061. * @p: pointer to an address structure
  2062. *
  2063. * Returns 0 on success, negative on failure
  2064. **/
  2065. static int ixgbe_set_mac(struct net_device *netdev, void *p)
  2066. {
  2067. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2068. struct sockaddr *addr = p;
  2069. if (!is_valid_ether_addr(addr->sa_data))
  2070. return -EADDRNOTAVAIL;
  2071. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  2072. memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
  2073. ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
  2074. return 0;
  2075. }
  2076. #ifdef CONFIG_NET_POLL_CONTROLLER
  2077. /*
  2078. * Polling 'interrupt' - used by things like netconsole to send skbs
  2079. * without having to re-enable interrupts. It's not called while
  2080. * the interrupt routine is executing.
  2081. */
  2082. static void ixgbe_netpoll(struct net_device *netdev)
  2083. {
  2084. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2085. disable_irq(adapter->pdev->irq);
  2086. adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
  2087. ixgbe_intr(adapter->pdev->irq, netdev);
  2088. adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
  2089. enable_irq(adapter->pdev->irq);
  2090. }
  2091. #endif
  2092. /**
  2093. * ixgbe_probe - Device Initialization Routine
  2094. * @pdev: PCI device information struct
  2095. * @ent: entry in ixgbe_pci_tbl
  2096. *
  2097. * Returns 0 on success, negative on failure
  2098. *
  2099. * ixgbe_probe initializes an adapter identified by a pci_dev structure.
  2100. * The OS initialization, configuring of the adapter private structure,
  2101. * and a hardware reset occur.
  2102. **/
  2103. static int __devinit ixgbe_probe(struct pci_dev *pdev,
  2104. const struct pci_device_id *ent)
  2105. {
  2106. struct net_device *netdev;
  2107. struct ixgbe_adapter *adapter = NULL;
  2108. struct ixgbe_hw *hw;
  2109. const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
  2110. unsigned long mmio_start, mmio_len;
  2111. static int cards_found;
  2112. int i, err, pci_using_dac;
  2113. u16 link_status, link_speed, link_width;
  2114. u32 part_num;
  2115. err = pci_enable_device(pdev);
  2116. if (err)
  2117. return err;
  2118. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
  2119. !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
  2120. pci_using_dac = 1;
  2121. } else {
  2122. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  2123. if (err) {
  2124. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2125. if (err) {
  2126. dev_err(&pdev->dev, "No usable DMA "
  2127. "configuration, aborting\n");
  2128. goto err_dma;
  2129. }
  2130. }
  2131. pci_using_dac = 0;
  2132. }
  2133. err = pci_request_regions(pdev, ixgbe_driver_name);
  2134. if (err) {
  2135. dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
  2136. goto err_pci_reg;
  2137. }
  2138. pci_set_master(pdev);
  2139. netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
  2140. if (!netdev) {
  2141. err = -ENOMEM;
  2142. goto err_alloc_etherdev;
  2143. }
  2144. SET_NETDEV_DEV(netdev, &pdev->dev);
  2145. pci_set_drvdata(pdev, netdev);
  2146. adapter = netdev_priv(netdev);
  2147. adapter->netdev = netdev;
  2148. adapter->pdev = pdev;
  2149. hw = &adapter->hw;
  2150. hw->back = adapter;
  2151. adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
  2152. mmio_start = pci_resource_start(pdev, 0);
  2153. mmio_len = pci_resource_len(pdev, 0);
  2154. hw->hw_addr = ioremap(mmio_start, mmio_len);
  2155. if (!hw->hw_addr) {
  2156. err = -EIO;
  2157. goto err_ioremap;
  2158. }
  2159. for (i = 1; i <= 5; i++) {
  2160. if (pci_resource_len(pdev, i) == 0)
  2161. continue;
  2162. }
  2163. netdev->open = &ixgbe_open;
  2164. netdev->stop = &ixgbe_close;
  2165. netdev->hard_start_xmit = &ixgbe_xmit_frame;
  2166. netdev->get_stats = &ixgbe_get_stats;
  2167. netdev->set_multicast_list = &ixgbe_set_multi;
  2168. netdev->set_mac_address = &ixgbe_set_mac;
  2169. netdev->change_mtu = &ixgbe_change_mtu;
  2170. ixgbe_set_ethtool_ops(netdev);
  2171. netdev->tx_timeout = &ixgbe_tx_timeout;
  2172. netdev->watchdog_timeo = 5 * HZ;
  2173. netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64);
  2174. netdev->vlan_rx_register = ixgbe_vlan_rx_register;
  2175. netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
  2176. netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
  2177. #ifdef CONFIG_NET_POLL_CONTROLLER
  2178. netdev->poll_controller = ixgbe_netpoll;
  2179. #endif
  2180. strcpy(netdev->name, pci_name(pdev));
  2181. netdev->mem_start = mmio_start;
  2182. netdev->mem_end = mmio_start + mmio_len;
  2183. adapter->bd_number = cards_found;
  2184. /* PCI config space info */
  2185. hw->vendor_id = pdev->vendor;
  2186. hw->device_id = pdev->device;
  2187. hw->revision_id = pdev->revision;
  2188. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2189. hw->subsystem_device_id = pdev->subsystem_device;
  2190. /* Setup hw api */
  2191. memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
  2192. err = ii->get_invariants(hw);
  2193. if (err)
  2194. goto err_hw_init;
  2195. /* setup the private structure */
  2196. err = ixgbe_sw_init(adapter);
  2197. if (err)
  2198. goto err_sw_init;
  2199. netdev->features = NETIF_F_SG |
  2200. NETIF_F_HW_CSUM |
  2201. NETIF_F_HW_VLAN_TX |
  2202. NETIF_F_HW_VLAN_RX |
  2203. NETIF_F_HW_VLAN_FILTER;
  2204. netdev->features |= NETIF_F_TSO;
  2205. netdev->features |= NETIF_F_TSO6;
  2206. if (pci_using_dac)
  2207. netdev->features |= NETIF_F_HIGHDMA;
  2208. /* make sure the EEPROM is good */
  2209. if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
  2210. dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
  2211. err = -EIO;
  2212. goto err_eeprom;
  2213. }
  2214. memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
  2215. memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
  2216. if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
  2217. err = -EIO;
  2218. goto err_eeprom;
  2219. }
  2220. init_timer(&adapter->watchdog_timer);
  2221. adapter->watchdog_timer.function = &ixgbe_watchdog;
  2222. adapter->watchdog_timer.data = (unsigned long)adapter;
  2223. INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
  2224. /* initialize default flow control settings */
  2225. hw->fc.original_type = ixgbe_fc_full;
  2226. hw->fc.type = ixgbe_fc_full;
  2227. hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
  2228. hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
  2229. hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
  2230. /* Interrupt Throttle Rate */
  2231. adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
  2232. adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
  2233. /* print bus type/speed/width info */
  2234. pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
  2235. link_speed = link_status & IXGBE_PCI_LINK_SPEED;
  2236. link_width = link_status & IXGBE_PCI_LINK_WIDTH;
  2237. dev_info(&pdev->dev, "(PCI Express:%s:%s) "
  2238. "%02x:%02x:%02x:%02x:%02x:%02x\n",
  2239. ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
  2240. (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
  2241. "Unknown"),
  2242. ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
  2243. (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
  2244. (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
  2245. (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
  2246. "Unknown"),
  2247. netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
  2248. netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
  2249. ixgbe_read_part_num(hw, &part_num);
  2250. dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
  2251. hw->mac.type, hw->phy.type,
  2252. (part_num >> 8), (part_num & 0xff));
  2253. /* reset the hardware with the new settings */
  2254. ixgbe_start_hw(hw);
  2255. netif_carrier_off(netdev);
  2256. netif_stop_queue(netdev);
  2257. strcpy(netdev->name, "eth%d");
  2258. err = register_netdev(netdev);
  2259. if (err)
  2260. goto err_register;
  2261. dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
  2262. cards_found++;
  2263. return 0;
  2264. err_register:
  2265. err_hw_init:
  2266. err_sw_init:
  2267. err_eeprom:
  2268. iounmap(hw->hw_addr);
  2269. err_ioremap:
  2270. free_netdev(netdev);
  2271. err_alloc_etherdev:
  2272. pci_release_regions(pdev);
  2273. err_pci_reg:
  2274. err_dma:
  2275. pci_disable_device(pdev);
  2276. return err;
  2277. }
  2278. /**
  2279. * ixgbe_remove - Device Removal Routine
  2280. * @pdev: PCI device information struct
  2281. *
  2282. * ixgbe_remove is called by the PCI subsystem to alert the driver
  2283. * that it should release a PCI device. The could be caused by a
  2284. * Hot-Plug event, or because the driver is going to be removed from
  2285. * memory.
  2286. **/
  2287. static void __devexit ixgbe_remove(struct pci_dev *pdev)
  2288. {
  2289. struct net_device *netdev = pci_get_drvdata(pdev);
  2290. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2291. set_bit(__IXGBE_DOWN, &adapter->state);
  2292. del_timer_sync(&adapter->watchdog_timer);
  2293. flush_scheduled_work();
  2294. unregister_netdev(netdev);
  2295. kfree(adapter->tx_ring);
  2296. kfree(adapter->rx_ring);
  2297. iounmap(adapter->hw.hw_addr);
  2298. pci_release_regions(pdev);
  2299. free_netdev(netdev);
  2300. pci_disable_device(pdev);
  2301. }
  2302. /**
  2303. * ixgbe_io_error_detected - called when PCI error is detected
  2304. * @pdev: Pointer to PCI device
  2305. * @state: The current pci connection state
  2306. *
  2307. * This function is called after a PCI bus error affecting
  2308. * this device has been detected.
  2309. */
  2310. static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  2311. pci_channel_state_t state)
  2312. {
  2313. struct net_device *netdev = pci_get_drvdata(pdev);
  2314. struct ixgbe_adapter *adapter = netdev->priv;
  2315. netif_device_detach(netdev);
  2316. if (netif_running(netdev))
  2317. ixgbe_down(adapter);
  2318. pci_disable_device(pdev);
  2319. /* Request a slot slot reset. */
  2320. return PCI_ERS_RESULT_NEED_RESET;
  2321. }
  2322. /**
  2323. * ixgbe_io_slot_reset - called after the pci bus has been reset.
  2324. * @pdev: Pointer to PCI device
  2325. *
  2326. * Restart the card from scratch, as if from a cold-boot.
  2327. */
  2328. static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  2329. {
  2330. struct net_device *netdev = pci_get_drvdata(pdev);
  2331. struct ixgbe_adapter *adapter = netdev->priv;
  2332. if (pci_enable_device(pdev)) {
  2333. DPRINTK(PROBE, ERR,
  2334. "Cannot re-enable PCI device after reset.\n");
  2335. return PCI_ERS_RESULT_DISCONNECT;
  2336. }
  2337. pci_set_master(pdev);
  2338. pci_enable_wake(pdev, PCI_D3hot, 0);
  2339. pci_enable_wake(pdev, PCI_D3cold, 0);
  2340. ixgbe_reset(adapter);
  2341. return PCI_ERS_RESULT_RECOVERED;
  2342. }
  2343. /**
  2344. * ixgbe_io_resume - called when traffic can start flowing again.
  2345. * @pdev: Pointer to PCI device
  2346. *
  2347. * This callback is called when the error recovery driver tells us that
  2348. * its OK to resume normal operation.
  2349. */
  2350. static void ixgbe_io_resume(struct pci_dev *pdev)
  2351. {
  2352. struct net_device *netdev = pci_get_drvdata(pdev);
  2353. struct ixgbe_adapter *adapter = netdev->priv;
  2354. if (netif_running(netdev)) {
  2355. if (ixgbe_up(adapter)) {
  2356. DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
  2357. return;
  2358. }
  2359. }
  2360. netif_device_attach(netdev);
  2361. }
  2362. static struct pci_error_handlers ixgbe_err_handler = {
  2363. .error_detected = ixgbe_io_error_detected,
  2364. .slot_reset = ixgbe_io_slot_reset,
  2365. .resume = ixgbe_io_resume,
  2366. };
  2367. static struct pci_driver ixgbe_driver = {
  2368. .name = ixgbe_driver_name,
  2369. .id_table = ixgbe_pci_tbl,
  2370. .probe = ixgbe_probe,
  2371. .remove = __devexit_p(ixgbe_remove),
  2372. #ifdef CONFIG_PM
  2373. .suspend = ixgbe_suspend,
  2374. .resume = ixgbe_resume,
  2375. #endif
  2376. .shutdown = ixgbe_shutdown,
  2377. .err_handler = &ixgbe_err_handler
  2378. };
  2379. /**
  2380. * ixgbe_init_module - Driver Registration Routine
  2381. *
  2382. * ixgbe_init_module is the first routine called when the driver is
  2383. * loaded. All it does is register with the PCI subsystem.
  2384. **/
  2385. static int __init ixgbe_init_module(void)
  2386. {
  2387. int ret;
  2388. printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
  2389. ixgbe_driver_string, ixgbe_driver_version);
  2390. printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
  2391. ret = pci_register_driver(&ixgbe_driver);
  2392. return ret;
  2393. }
  2394. module_init(ixgbe_init_module);
  2395. /**
  2396. * ixgbe_exit_module - Driver Exit Cleanup Routine
  2397. *
  2398. * ixgbe_exit_module is called just before the driver is removed
  2399. * from memory.
  2400. **/
  2401. static void __exit ixgbe_exit_module(void)
  2402. {
  2403. pci_unregister_driver(&ixgbe_driver);
  2404. }
  2405. module_exit(ixgbe_exit_module);
  2406. /* ixgbe_main.c */