ixgb_main.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314
  1. /*******************************************************************************
  2. Intel PRO/10GbE Linux driver
  3. Copyright(c) 1999 - 2006 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include "ixgb.h"
  22. char ixgb_driver_name[] = "ixgb";
  23. static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  24. #ifndef CONFIG_IXGB_NAPI
  25. #define DRIVERNAPI
  26. #else
  27. #define DRIVERNAPI "-NAPI"
  28. #endif
  29. #define DRV_VERSION "1.0.117-k2"DRIVERNAPI
  30. char ixgb_driver_version[] = DRV_VERSION;
  31. static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  32. /* ixgb_pci_tbl - PCI Device ID Table
  33. *
  34. * Wildcard entries (PCI_ANY_ID) should come last
  35. * Last entry must be all 0s
  36. *
  37. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  38. * Class, Class Mask, private data (not used) }
  39. */
  40. static struct pci_device_id ixgb_pci_tbl[] = {
  41. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
  42. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  43. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
  44. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  45. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
  46. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  47. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
  48. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  49. /* required last entry */
  50. {0,}
  51. };
  52. MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  53. /* Local Function Prototypes */
  54. int ixgb_up(struct ixgb_adapter *adapter);
  55. void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
  56. void ixgb_reset(struct ixgb_adapter *adapter);
  57. int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
  58. int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
  59. void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
  60. void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
  61. void ixgb_update_stats(struct ixgb_adapter *adapter);
  62. static int ixgb_init_module(void);
  63. static void ixgb_exit_module(void);
  64. static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  65. static void __devexit ixgb_remove(struct pci_dev *pdev);
  66. static int ixgb_sw_init(struct ixgb_adapter *adapter);
  67. static int ixgb_open(struct net_device *netdev);
  68. static int ixgb_close(struct net_device *netdev);
  69. static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  70. static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  71. static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  72. static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  73. static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  74. static void ixgb_set_multi(struct net_device *netdev);
  75. static void ixgb_watchdog(unsigned long data);
  76. static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  77. static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
  78. static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  79. static int ixgb_set_mac(struct net_device *netdev, void *p);
  80. static irqreturn_t ixgb_intr(int irq, void *data);
  81. static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  82. #ifdef CONFIG_IXGB_NAPI
  83. static int ixgb_clean(struct net_device *netdev, int *budget);
  84. static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
  85. int *work_done, int work_to_do);
  86. #else
  87. static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
  88. #endif
  89. static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
  90. void ixgb_set_ethtool_ops(struct net_device *netdev);
  91. static void ixgb_tx_timeout(struct net_device *dev);
  92. static void ixgb_tx_timeout_task(struct net_device *dev);
  93. static void ixgb_vlan_rx_register(struct net_device *netdev,
  94. struct vlan_group *grp);
  95. static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
  96. static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
  97. static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  98. #ifdef CONFIG_NET_POLL_CONTROLLER
  99. /* for netdump / net console */
  100. static void ixgb_netpoll(struct net_device *dev);
  101. #endif
  102. static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  103. enum pci_channel_state state);
  104. static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  105. static void ixgb_io_resume (struct pci_dev *pdev);
  106. /* Exported from other modules */
  107. extern void ixgb_check_options(struct ixgb_adapter *adapter);
  108. static struct pci_error_handlers ixgb_err_handler = {
  109. .error_detected = ixgb_io_error_detected,
  110. .slot_reset = ixgb_io_slot_reset,
  111. .resume = ixgb_io_resume,
  112. };
  113. static struct pci_driver ixgb_driver = {
  114. .name = ixgb_driver_name,
  115. .id_table = ixgb_pci_tbl,
  116. .probe = ixgb_probe,
  117. .remove = __devexit_p(ixgb_remove),
  118. .err_handler = &ixgb_err_handler
  119. };
  120. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  121. MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
  122. MODULE_LICENSE("GPL");
  123. MODULE_VERSION(DRV_VERSION);
  124. #define DEFAULT_DEBUG_LEVEL_SHIFT 3
  125. static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
  126. module_param(debug, int, 0);
  127. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  128. /* some defines for controlling descriptor fetches in h/w */
  129. #define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
  130. #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
  131. * this */
  132. #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
  133. * is pushed this many descriptors
  134. * from head */
  135. /**
  136. * ixgb_init_module - Driver Registration Routine
  137. *
  138. * ixgb_init_module is the first routine called when the driver is
  139. * loaded. All it does is register with the PCI subsystem.
  140. **/
  141. static int __init
  142. ixgb_init_module(void)
  143. {
  144. printk(KERN_INFO "%s - version %s\n",
  145. ixgb_driver_string, ixgb_driver_version);
  146. printk(KERN_INFO "%s\n", ixgb_copyright);
  147. return pci_register_driver(&ixgb_driver);
  148. }
  149. module_init(ixgb_init_module);
  150. /**
  151. * ixgb_exit_module - Driver Exit Cleanup Routine
  152. *
  153. * ixgb_exit_module is called just before the driver is removed
  154. * from memory.
  155. **/
  156. static void __exit
  157. ixgb_exit_module(void)
  158. {
  159. pci_unregister_driver(&ixgb_driver);
  160. }
  161. module_exit(ixgb_exit_module);
  162. /**
  163. * ixgb_irq_disable - Mask off interrupt generation on the NIC
  164. * @adapter: board private structure
  165. **/
  166. static void
  167. ixgb_irq_disable(struct ixgb_adapter *adapter)
  168. {
  169. atomic_inc(&adapter->irq_sem);
  170. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  171. IXGB_WRITE_FLUSH(&adapter->hw);
  172. synchronize_irq(adapter->pdev->irq);
  173. }
  174. /**
  175. * ixgb_irq_enable - Enable default interrupt generation settings
  176. * @adapter: board private structure
  177. **/
  178. static void
  179. ixgb_irq_enable(struct ixgb_adapter *adapter)
  180. {
  181. if(atomic_dec_and_test(&adapter->irq_sem)) {
  182. IXGB_WRITE_REG(&adapter->hw, IMS,
  183. IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
  184. IXGB_INT_LSC);
  185. IXGB_WRITE_FLUSH(&adapter->hw);
  186. }
  187. }
  188. int
  189. ixgb_up(struct ixgb_adapter *adapter)
  190. {
  191. struct net_device *netdev = adapter->netdev;
  192. int err;
  193. int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  194. struct ixgb_hw *hw = &adapter->hw;
  195. /* hardware has been reset, we need to reload some things */
  196. ixgb_rar_set(hw, netdev->dev_addr, 0);
  197. ixgb_set_multi(netdev);
  198. ixgb_restore_vlan(adapter);
  199. ixgb_configure_tx(adapter);
  200. ixgb_setup_rctl(adapter);
  201. ixgb_configure_rx(adapter);
  202. ixgb_alloc_rx_buffers(adapter);
  203. /* disable interrupts and get the hardware into a known state */
  204. IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
  205. #ifdef CONFIG_PCI_MSI
  206. {
  207. boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
  208. IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
  209. adapter->have_msi = TRUE;
  210. if (!pcix)
  211. adapter->have_msi = FALSE;
  212. else if((err = pci_enable_msi(adapter->pdev))) {
  213. DPRINTK(PROBE, ERR,
  214. "Unable to allocate MSI interrupt Error: %d\n", err);
  215. adapter->have_msi = FALSE;
  216. /* proceed to try to request regular interrupt */
  217. }
  218. }
  219. #endif
  220. if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
  221. IRQF_SHARED | IRQF_SAMPLE_RANDOM,
  222. netdev->name, netdev))) {
  223. DPRINTK(PROBE, ERR,
  224. "Unable to allocate interrupt Error: %d\n", err);
  225. return err;
  226. }
  227. if((hw->max_frame_size != max_frame) ||
  228. (hw->max_frame_size !=
  229. (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
  230. hw->max_frame_size = max_frame;
  231. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  232. if(hw->max_frame_size >
  233. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  234. uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
  235. if(!(ctrl0 & IXGB_CTRL0_JFE)) {
  236. ctrl0 |= IXGB_CTRL0_JFE;
  237. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  238. }
  239. }
  240. }
  241. mod_timer(&adapter->watchdog_timer, jiffies);
  242. #ifdef CONFIG_IXGB_NAPI
  243. netif_poll_enable(netdev);
  244. #endif
  245. ixgb_irq_enable(adapter);
  246. return 0;
  247. }
  248. void
  249. ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
  250. {
  251. struct net_device *netdev = adapter->netdev;
  252. ixgb_irq_disable(adapter);
  253. free_irq(adapter->pdev->irq, netdev);
  254. #ifdef CONFIG_PCI_MSI
  255. if(adapter->have_msi == TRUE)
  256. pci_disable_msi(adapter->pdev);
  257. #endif
  258. if(kill_watchdog)
  259. del_timer_sync(&adapter->watchdog_timer);
  260. #ifdef CONFIG_IXGB_NAPI
  261. netif_poll_disable(netdev);
  262. #endif
  263. adapter->link_speed = 0;
  264. adapter->link_duplex = 0;
  265. netif_carrier_off(netdev);
  266. netif_stop_queue(netdev);
  267. ixgb_reset(adapter);
  268. ixgb_clean_tx_ring(adapter);
  269. ixgb_clean_rx_ring(adapter);
  270. }
  271. void
  272. ixgb_reset(struct ixgb_adapter *adapter)
  273. {
  274. ixgb_adapter_stop(&adapter->hw);
  275. if(!ixgb_init_hw(&adapter->hw))
  276. DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
  277. }
  278. /**
  279. * ixgb_probe - Device Initialization Routine
  280. * @pdev: PCI device information struct
  281. * @ent: entry in ixgb_pci_tbl
  282. *
  283. * Returns 0 on success, negative on failure
  284. *
  285. * ixgb_probe initializes an adapter identified by a pci_dev structure.
  286. * The OS initialization, configuring of the adapter private structure,
  287. * and a hardware reset occur.
  288. **/
  289. static int __devinit
  290. ixgb_probe(struct pci_dev *pdev,
  291. const struct pci_device_id *ent)
  292. {
  293. struct net_device *netdev = NULL;
  294. struct ixgb_adapter *adapter;
  295. static int cards_found = 0;
  296. unsigned long mmio_start;
  297. int mmio_len;
  298. int pci_using_dac;
  299. int i;
  300. int err;
  301. if((err = pci_enable_device(pdev)))
  302. return err;
  303. if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
  304. !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
  305. pci_using_dac = 1;
  306. } else {
  307. if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
  308. (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
  309. printk(KERN_ERR
  310. "ixgb: No usable DMA configuration, aborting\n");
  311. goto err_dma_mask;
  312. }
  313. pci_using_dac = 0;
  314. }
  315. if((err = pci_request_regions(pdev, ixgb_driver_name)))
  316. goto err_request_regions;
  317. pci_set_master(pdev);
  318. netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
  319. if(!netdev) {
  320. err = -ENOMEM;
  321. goto err_alloc_etherdev;
  322. }
  323. SET_MODULE_OWNER(netdev);
  324. SET_NETDEV_DEV(netdev, &pdev->dev);
  325. pci_set_drvdata(pdev, netdev);
  326. adapter = netdev_priv(netdev);
  327. adapter->netdev = netdev;
  328. adapter->pdev = pdev;
  329. adapter->hw.back = adapter;
  330. adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
  331. mmio_start = pci_resource_start(pdev, BAR_0);
  332. mmio_len = pci_resource_len(pdev, BAR_0);
  333. adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
  334. if(!adapter->hw.hw_addr) {
  335. err = -EIO;
  336. goto err_ioremap;
  337. }
  338. for(i = BAR_1; i <= BAR_5; i++) {
  339. if(pci_resource_len(pdev, i) == 0)
  340. continue;
  341. if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  342. adapter->hw.io_base = pci_resource_start(pdev, i);
  343. break;
  344. }
  345. }
  346. netdev->open = &ixgb_open;
  347. netdev->stop = &ixgb_close;
  348. netdev->hard_start_xmit = &ixgb_xmit_frame;
  349. netdev->get_stats = &ixgb_get_stats;
  350. netdev->set_multicast_list = &ixgb_set_multi;
  351. netdev->set_mac_address = &ixgb_set_mac;
  352. netdev->change_mtu = &ixgb_change_mtu;
  353. ixgb_set_ethtool_ops(netdev);
  354. netdev->tx_timeout = &ixgb_tx_timeout;
  355. netdev->watchdog_timeo = 5 * HZ;
  356. #ifdef CONFIG_IXGB_NAPI
  357. netdev->poll = &ixgb_clean;
  358. netdev->weight = 64;
  359. #endif
  360. netdev->vlan_rx_register = ixgb_vlan_rx_register;
  361. netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
  362. netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
  363. #ifdef CONFIG_NET_POLL_CONTROLLER
  364. netdev->poll_controller = ixgb_netpoll;
  365. #endif
  366. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  367. netdev->mem_start = mmio_start;
  368. netdev->mem_end = mmio_start + mmio_len;
  369. netdev->base_addr = adapter->hw.io_base;
  370. adapter->bd_number = cards_found;
  371. adapter->link_speed = 0;
  372. adapter->link_duplex = 0;
  373. /* setup the private structure */
  374. if((err = ixgb_sw_init(adapter)))
  375. goto err_sw_init;
  376. netdev->features = NETIF_F_SG |
  377. NETIF_F_HW_CSUM |
  378. NETIF_F_HW_VLAN_TX |
  379. NETIF_F_HW_VLAN_RX |
  380. NETIF_F_HW_VLAN_FILTER;
  381. #ifdef NETIF_F_TSO
  382. netdev->features |= NETIF_F_TSO;
  383. #endif
  384. #ifdef NETIF_F_LLTX
  385. netdev->features |= NETIF_F_LLTX;
  386. #endif
  387. if(pci_using_dac)
  388. netdev->features |= NETIF_F_HIGHDMA;
  389. /* make sure the EEPROM is good */
  390. if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  391. DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  392. err = -EIO;
  393. goto err_eeprom;
  394. }
  395. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  396. memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  397. if(!is_valid_ether_addr(netdev->perm_addr)) {
  398. DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  399. err = -EIO;
  400. goto err_eeprom;
  401. }
  402. adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
  403. init_timer(&adapter->watchdog_timer);
  404. adapter->watchdog_timer.function = &ixgb_watchdog;
  405. adapter->watchdog_timer.data = (unsigned long)adapter;
  406. INIT_WORK(&adapter->tx_timeout_task,
  407. (void (*)(void *))ixgb_tx_timeout_task, netdev);
  408. strcpy(netdev->name, "eth%d");
  409. if((err = register_netdev(netdev)))
  410. goto err_register;
  411. /* we're going to reset, so assume we have no link for now */
  412. netif_carrier_off(netdev);
  413. netif_stop_queue(netdev);
  414. DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
  415. ixgb_check_options(adapter);
  416. /* reset the hardware with the new settings */
  417. ixgb_reset(adapter);
  418. cards_found++;
  419. return 0;
  420. err_register:
  421. err_sw_init:
  422. err_eeprom:
  423. iounmap(adapter->hw.hw_addr);
  424. err_ioremap:
  425. free_netdev(netdev);
  426. err_alloc_etherdev:
  427. pci_release_regions(pdev);
  428. err_request_regions:
  429. err_dma_mask:
  430. pci_disable_device(pdev);
  431. return err;
  432. }
  433. /**
  434. * ixgb_remove - Device Removal Routine
  435. * @pdev: PCI device information struct
  436. *
  437. * ixgb_remove is called by the PCI subsystem to alert the driver
  438. * that it should release a PCI device. The could be caused by a
  439. * Hot-Plug event, or because the driver is going to be removed from
  440. * memory.
  441. **/
  442. static void __devexit
  443. ixgb_remove(struct pci_dev *pdev)
  444. {
  445. struct net_device *netdev = pci_get_drvdata(pdev);
  446. struct ixgb_adapter *adapter = netdev_priv(netdev);
  447. unregister_netdev(netdev);
  448. iounmap(adapter->hw.hw_addr);
  449. pci_release_regions(pdev);
  450. free_netdev(netdev);
  451. }
  452. /**
  453. * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
  454. * @adapter: board private structure to initialize
  455. *
  456. * ixgb_sw_init initializes the Adapter private data structure.
  457. * Fields are initialized based on PCI device information and
  458. * OS network device settings (MTU size).
  459. **/
  460. static int __devinit
  461. ixgb_sw_init(struct ixgb_adapter *adapter)
  462. {
  463. struct ixgb_hw *hw = &adapter->hw;
  464. struct net_device *netdev = adapter->netdev;
  465. struct pci_dev *pdev = adapter->pdev;
  466. /* PCI config space info */
  467. hw->vendor_id = pdev->vendor;
  468. hw->device_id = pdev->device;
  469. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  470. hw->subsystem_id = pdev->subsystem_device;
  471. hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  472. adapter->rx_buffer_len = hw->max_frame_size;
  473. if((hw->device_id == IXGB_DEVICE_ID_82597EX)
  474. || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
  475. || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
  476. || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
  477. hw->mac_type = ixgb_82597;
  478. else {
  479. /* should never have loaded on this device */
  480. DPRINTK(PROBE, ERR, "unsupported device id\n");
  481. }
  482. /* enable flow control to be programmed */
  483. hw->fc.send_xon = 1;
  484. atomic_set(&adapter->irq_sem, 1);
  485. spin_lock_init(&adapter->tx_lock);
  486. return 0;
  487. }
  488. /**
  489. * ixgb_open - Called when a network interface is made active
  490. * @netdev: network interface device structure
  491. *
  492. * Returns 0 on success, negative value on failure
  493. *
  494. * The open entry point is called when a network interface is made
  495. * active by the system (IFF_UP). At this point all resources needed
  496. * for transmit and receive operations are allocated, the interrupt
  497. * handler is registered with the OS, the watchdog timer is started,
  498. * and the stack is notified that the interface is ready.
  499. **/
  500. static int
  501. ixgb_open(struct net_device *netdev)
  502. {
  503. struct ixgb_adapter *adapter = netdev_priv(netdev);
  504. int err;
  505. /* allocate transmit descriptors */
  506. if((err = ixgb_setup_tx_resources(adapter)))
  507. goto err_setup_tx;
  508. /* allocate receive descriptors */
  509. if((err = ixgb_setup_rx_resources(adapter)))
  510. goto err_setup_rx;
  511. if((err = ixgb_up(adapter)))
  512. goto err_up;
  513. return 0;
  514. err_up:
  515. ixgb_free_rx_resources(adapter);
  516. err_setup_rx:
  517. ixgb_free_tx_resources(adapter);
  518. err_setup_tx:
  519. ixgb_reset(adapter);
  520. return err;
  521. }
  522. /**
  523. * ixgb_close - Disables a network interface
  524. * @netdev: network interface device structure
  525. *
  526. * Returns 0, this is not allowed to fail
  527. *
  528. * The close entry point is called when an interface is de-activated
  529. * by the OS. The hardware is still under the drivers control, but
  530. * needs to be disabled. A global MAC reset is issued to stop the
  531. * hardware, and all transmit and receive resources are freed.
  532. **/
  533. static int
  534. ixgb_close(struct net_device *netdev)
  535. {
  536. struct ixgb_adapter *adapter = netdev_priv(netdev);
  537. ixgb_down(adapter, TRUE);
  538. ixgb_free_tx_resources(adapter);
  539. ixgb_free_rx_resources(adapter);
  540. return 0;
  541. }
  542. /**
  543. * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
  544. * @adapter: board private structure
  545. *
  546. * Return 0 on success, negative on failure
  547. **/
  548. int
  549. ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
  550. {
  551. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  552. struct pci_dev *pdev = adapter->pdev;
  553. int size;
  554. size = sizeof(struct ixgb_buffer) * txdr->count;
  555. txdr->buffer_info = vmalloc(size);
  556. if(!txdr->buffer_info) {
  557. DPRINTK(PROBE, ERR,
  558. "Unable to allocate transmit descriptor ring memory\n");
  559. return -ENOMEM;
  560. }
  561. memset(txdr->buffer_info, 0, size);
  562. /* round up to nearest 4K */
  563. txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
  564. IXGB_ROUNDUP(txdr->size, 4096);
  565. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  566. if(!txdr->desc) {
  567. vfree(txdr->buffer_info);
  568. DPRINTK(PROBE, ERR,
  569. "Unable to allocate transmit descriptor memory\n");
  570. return -ENOMEM;
  571. }
  572. memset(txdr->desc, 0, txdr->size);
  573. txdr->next_to_use = 0;
  574. txdr->next_to_clean = 0;
  575. return 0;
  576. }
  577. /**
  578. * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
  579. * @adapter: board private structure
  580. *
  581. * Configure the Tx unit of the MAC after a reset.
  582. **/
  583. static void
  584. ixgb_configure_tx(struct ixgb_adapter *adapter)
  585. {
  586. uint64_t tdba = adapter->tx_ring.dma;
  587. uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
  588. uint32_t tctl;
  589. struct ixgb_hw *hw = &adapter->hw;
  590. /* Setup the Base and Length of the Tx Descriptor Ring
  591. * tx_ring.dma can be either a 32 or 64 bit value
  592. */
  593. IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  594. IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
  595. IXGB_WRITE_REG(hw, TDLEN, tdlen);
  596. /* Setup the HW Tx Head and Tail descriptor pointers */
  597. IXGB_WRITE_REG(hw, TDH, 0);
  598. IXGB_WRITE_REG(hw, TDT, 0);
  599. /* don't set up txdctl, it induces performance problems if configured
  600. * incorrectly */
  601. /* Set the Tx Interrupt Delay register */
  602. IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  603. /* Program the Transmit Control Register */
  604. tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
  605. IXGB_WRITE_REG(hw, TCTL, tctl);
  606. /* Setup Transmit Descriptor Settings for this adapter */
  607. adapter->tx_cmd_type =
  608. IXGB_TX_DESC_TYPE
  609. | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
  610. }
  611. /**
  612. * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
  613. * @adapter: board private structure
  614. *
  615. * Returns 0 on success, negative on failure
  616. **/
  617. int
  618. ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
  619. {
  620. struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
  621. struct pci_dev *pdev = adapter->pdev;
  622. int size;
  623. size = sizeof(struct ixgb_buffer) * rxdr->count;
  624. rxdr->buffer_info = vmalloc(size);
  625. if(!rxdr->buffer_info) {
  626. DPRINTK(PROBE, ERR,
  627. "Unable to allocate receive descriptor ring\n");
  628. return -ENOMEM;
  629. }
  630. memset(rxdr->buffer_info, 0, size);
  631. /* Round up to nearest 4K */
  632. rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
  633. IXGB_ROUNDUP(rxdr->size, 4096);
  634. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  635. if(!rxdr->desc) {
  636. vfree(rxdr->buffer_info);
  637. DPRINTK(PROBE, ERR,
  638. "Unable to allocate receive descriptors\n");
  639. return -ENOMEM;
  640. }
  641. memset(rxdr->desc, 0, rxdr->size);
  642. rxdr->next_to_clean = 0;
  643. rxdr->next_to_use = 0;
  644. return 0;
  645. }
  646. /**
  647. * ixgb_setup_rctl - configure the receive control register
  648. * @adapter: Board private structure
  649. **/
  650. static void
  651. ixgb_setup_rctl(struct ixgb_adapter *adapter)
  652. {
  653. uint32_t rctl;
  654. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  655. rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
  656. rctl |=
  657. IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
  658. IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
  659. (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
  660. rctl |= IXGB_RCTL_SECRC;
  661. if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
  662. rctl |= IXGB_RCTL_BSIZE_2048;
  663. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
  664. rctl |= IXGB_RCTL_BSIZE_4096;
  665. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
  666. rctl |= IXGB_RCTL_BSIZE_8192;
  667. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
  668. rctl |= IXGB_RCTL_BSIZE_16384;
  669. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  670. }
  671. /**
  672. * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
  673. * @adapter: board private structure
  674. *
  675. * Configure the Rx unit of the MAC after a reset.
  676. **/
  677. static void
  678. ixgb_configure_rx(struct ixgb_adapter *adapter)
  679. {
  680. uint64_t rdba = adapter->rx_ring.dma;
  681. uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
  682. struct ixgb_hw *hw = &adapter->hw;
  683. uint32_t rctl;
  684. uint32_t rxcsum;
  685. uint32_t rxdctl;
  686. /* make sure receives are disabled while setting up the descriptors */
  687. rctl = IXGB_READ_REG(hw, RCTL);
  688. IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
  689. /* set the Receive Delay Timer Register */
  690. IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  691. /* Setup the Base and Length of the Rx Descriptor Ring */
  692. IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  693. IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
  694. IXGB_WRITE_REG(hw, RDLEN, rdlen);
  695. /* Setup the HW Rx Head and Tail Descriptor Pointers */
  696. IXGB_WRITE_REG(hw, RDH, 0);
  697. IXGB_WRITE_REG(hw, RDT, 0);
  698. /* set up pre-fetching of receive buffers so we get some before we
  699. * run out (default hardware behavior is to run out before fetching
  700. * more). This sets up to fetch if HTHRESH rx descriptors are avail
  701. * and the descriptors in hw cache are below PTHRESH. This avoids
  702. * the hardware behavior of fetching <=512 descriptors in a single
  703. * burst that pre-empts all other activity, usually causing fifo
  704. * overflows. */
  705. /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
  706. rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
  707. RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
  708. RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
  709. IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
  710. /* Enable Receive Checksum Offload for TCP and UDP */
  711. if(adapter->rx_csum == TRUE) {
  712. rxcsum = IXGB_READ_REG(hw, RXCSUM);
  713. rxcsum |= IXGB_RXCSUM_TUOFL;
  714. IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
  715. }
  716. /* Enable Receives */
  717. IXGB_WRITE_REG(hw, RCTL, rctl);
  718. }
  719. /**
  720. * ixgb_free_tx_resources - Free Tx Resources
  721. * @adapter: board private structure
  722. *
  723. * Free all transmit software resources
  724. **/
  725. void
  726. ixgb_free_tx_resources(struct ixgb_adapter *adapter)
  727. {
  728. struct pci_dev *pdev = adapter->pdev;
  729. ixgb_clean_tx_ring(adapter);
  730. vfree(adapter->tx_ring.buffer_info);
  731. adapter->tx_ring.buffer_info = NULL;
  732. pci_free_consistent(pdev, adapter->tx_ring.size,
  733. adapter->tx_ring.desc, adapter->tx_ring.dma);
  734. adapter->tx_ring.desc = NULL;
  735. }
  736. static void
  737. ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
  738. struct ixgb_buffer *buffer_info)
  739. {
  740. struct pci_dev *pdev = adapter->pdev;
  741. if (buffer_info->dma)
  742. pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
  743. PCI_DMA_TODEVICE);
  744. if (buffer_info->skb)
  745. dev_kfree_skb_any(buffer_info->skb);
  746. buffer_info->skb = NULL;
  747. buffer_info->dma = 0;
  748. buffer_info->time_stamp = 0;
  749. /* these fields must always be initialized in tx
  750. * buffer_info->length = 0;
  751. * buffer_info->next_to_watch = 0; */
  752. }
  753. /**
  754. * ixgb_clean_tx_ring - Free Tx Buffers
  755. * @adapter: board private structure
  756. **/
  757. static void
  758. ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
  759. {
  760. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  761. struct ixgb_buffer *buffer_info;
  762. unsigned long size;
  763. unsigned int i;
  764. /* Free all the Tx ring sk_buffs */
  765. for(i = 0; i < tx_ring->count; i++) {
  766. buffer_info = &tx_ring->buffer_info[i];
  767. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  768. }
  769. size = sizeof(struct ixgb_buffer) * tx_ring->count;
  770. memset(tx_ring->buffer_info, 0, size);
  771. /* Zero out the descriptor ring */
  772. memset(tx_ring->desc, 0, tx_ring->size);
  773. tx_ring->next_to_use = 0;
  774. tx_ring->next_to_clean = 0;
  775. IXGB_WRITE_REG(&adapter->hw, TDH, 0);
  776. IXGB_WRITE_REG(&adapter->hw, TDT, 0);
  777. }
  778. /**
  779. * ixgb_free_rx_resources - Free Rx Resources
  780. * @adapter: board private structure
  781. *
  782. * Free all receive software resources
  783. **/
  784. void
  785. ixgb_free_rx_resources(struct ixgb_adapter *adapter)
  786. {
  787. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  788. struct pci_dev *pdev = adapter->pdev;
  789. ixgb_clean_rx_ring(adapter);
  790. vfree(rx_ring->buffer_info);
  791. rx_ring->buffer_info = NULL;
  792. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  793. rx_ring->desc = NULL;
  794. }
  795. /**
  796. * ixgb_clean_rx_ring - Free Rx Buffers
  797. * @adapter: board private structure
  798. **/
  799. static void
  800. ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
  801. {
  802. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  803. struct ixgb_buffer *buffer_info;
  804. struct pci_dev *pdev = adapter->pdev;
  805. unsigned long size;
  806. unsigned int i;
  807. /* Free all the Rx ring sk_buffs */
  808. for(i = 0; i < rx_ring->count; i++) {
  809. buffer_info = &rx_ring->buffer_info[i];
  810. if(buffer_info->skb) {
  811. pci_unmap_single(pdev,
  812. buffer_info->dma,
  813. buffer_info->length,
  814. PCI_DMA_FROMDEVICE);
  815. dev_kfree_skb(buffer_info->skb);
  816. buffer_info->skb = NULL;
  817. }
  818. }
  819. size = sizeof(struct ixgb_buffer) * rx_ring->count;
  820. memset(rx_ring->buffer_info, 0, size);
  821. /* Zero out the descriptor ring */
  822. memset(rx_ring->desc, 0, rx_ring->size);
  823. rx_ring->next_to_clean = 0;
  824. rx_ring->next_to_use = 0;
  825. IXGB_WRITE_REG(&adapter->hw, RDH, 0);
  826. IXGB_WRITE_REG(&adapter->hw, RDT, 0);
  827. }
  828. /**
  829. * ixgb_set_mac - Change the Ethernet Address of the NIC
  830. * @netdev: network interface device structure
  831. * @p: pointer to an address structure
  832. *
  833. * Returns 0 on success, negative on failure
  834. **/
  835. static int
  836. ixgb_set_mac(struct net_device *netdev, void *p)
  837. {
  838. struct ixgb_adapter *adapter = netdev_priv(netdev);
  839. struct sockaddr *addr = p;
  840. if(!is_valid_ether_addr(addr->sa_data))
  841. return -EADDRNOTAVAIL;
  842. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  843. ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
  844. return 0;
  845. }
  846. /**
  847. * ixgb_set_multi - Multicast and Promiscuous mode set
  848. * @netdev: network interface device structure
  849. *
  850. * The set_multi entry point is called whenever the multicast address
  851. * list or the network interface flags are updated. This routine is
  852. * responsible for configuring the hardware for proper multicast,
  853. * promiscuous mode, and all-multi behavior.
  854. **/
  855. static void
  856. ixgb_set_multi(struct net_device *netdev)
  857. {
  858. struct ixgb_adapter *adapter = netdev_priv(netdev);
  859. struct ixgb_hw *hw = &adapter->hw;
  860. struct dev_mc_list *mc_ptr;
  861. uint32_t rctl;
  862. int i;
  863. /* Check for Promiscuous and All Multicast modes */
  864. rctl = IXGB_READ_REG(hw, RCTL);
  865. if(netdev->flags & IFF_PROMISC) {
  866. rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  867. } else if(netdev->flags & IFF_ALLMULTI) {
  868. rctl |= IXGB_RCTL_MPE;
  869. rctl &= ~IXGB_RCTL_UPE;
  870. } else {
  871. rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  872. }
  873. if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
  874. rctl |= IXGB_RCTL_MPE;
  875. IXGB_WRITE_REG(hw, RCTL, rctl);
  876. } else {
  877. uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
  878. IXGB_WRITE_REG(hw, RCTL, rctl);
  879. for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
  880. i++, mc_ptr = mc_ptr->next)
  881. memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
  882. mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
  883. ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
  884. }
  885. }
  886. /**
  887. * ixgb_watchdog - Timer Call-back
  888. * @data: pointer to netdev cast into an unsigned long
  889. **/
  890. static void
  891. ixgb_watchdog(unsigned long data)
  892. {
  893. struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
  894. struct net_device *netdev = adapter->netdev;
  895. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  896. ixgb_check_for_link(&adapter->hw);
  897. if (ixgb_check_for_bad_link(&adapter->hw)) {
  898. /* force the reset path */
  899. netif_stop_queue(netdev);
  900. }
  901. if(adapter->hw.link_up) {
  902. if(!netif_carrier_ok(netdev)) {
  903. DPRINTK(LINK, INFO,
  904. "NIC Link is Up 10000 Mbps Full Duplex\n");
  905. adapter->link_speed = 10000;
  906. adapter->link_duplex = FULL_DUPLEX;
  907. netif_carrier_on(netdev);
  908. netif_wake_queue(netdev);
  909. }
  910. } else {
  911. if(netif_carrier_ok(netdev)) {
  912. adapter->link_speed = 0;
  913. adapter->link_duplex = 0;
  914. DPRINTK(LINK, INFO, "NIC Link is Down\n");
  915. netif_carrier_off(netdev);
  916. netif_stop_queue(netdev);
  917. }
  918. }
  919. ixgb_update_stats(adapter);
  920. if(!netif_carrier_ok(netdev)) {
  921. if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
  922. /* We've lost link, so the controller stops DMA,
  923. * but we've got queued Tx work that's never going
  924. * to get done, so reset controller to flush Tx.
  925. * (Do the reset outside of interrupt context). */
  926. schedule_work(&adapter->tx_timeout_task);
  927. }
  928. }
  929. /* Force detection of hung controller every watchdog period */
  930. adapter->detect_tx_hung = TRUE;
  931. /* generate an interrupt to force clean up of any stragglers */
  932. IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
  933. /* Reset the timer */
  934. mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  935. }
  936. #define IXGB_TX_FLAGS_CSUM 0x00000001
  937. #define IXGB_TX_FLAGS_VLAN 0x00000002
  938. #define IXGB_TX_FLAGS_TSO 0x00000004
  939. static int
  940. ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
  941. {
  942. #ifdef NETIF_F_TSO
  943. struct ixgb_context_desc *context_desc;
  944. unsigned int i;
  945. uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  946. uint16_t ipcse, tucse, mss;
  947. int err;
  948. if (likely(skb_is_gso(skb))) {
  949. struct ixgb_buffer *buffer_info;
  950. if (skb_header_cloned(skb)) {
  951. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  952. if (err)
  953. return err;
  954. }
  955. hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  956. mss = skb_shinfo(skb)->gso_size;
  957. skb->nh.iph->tot_len = 0;
  958. skb->nh.iph->check = 0;
  959. skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
  960. skb->nh.iph->daddr,
  961. 0, IPPROTO_TCP, 0);
  962. ipcss = skb->nh.raw - skb->data;
  963. ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
  964. ipcse = skb->h.raw - skb->data - 1;
  965. tucss = skb->h.raw - skb->data;
  966. tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
  967. tucse = 0;
  968. i = adapter->tx_ring.next_to_use;
  969. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  970. buffer_info = &adapter->tx_ring.buffer_info[i];
  971. WARN_ON(buffer_info->dma != 0);
  972. context_desc->ipcss = ipcss;
  973. context_desc->ipcso = ipcso;
  974. context_desc->ipcse = cpu_to_le16(ipcse);
  975. context_desc->tucss = tucss;
  976. context_desc->tucso = tucso;
  977. context_desc->tucse = cpu_to_le16(tucse);
  978. context_desc->mss = cpu_to_le16(mss);
  979. context_desc->hdr_len = hdr_len;
  980. context_desc->status = 0;
  981. context_desc->cmd_type_len = cpu_to_le32(
  982. IXGB_CONTEXT_DESC_TYPE
  983. | IXGB_CONTEXT_DESC_CMD_TSE
  984. | IXGB_CONTEXT_DESC_CMD_IP
  985. | IXGB_CONTEXT_DESC_CMD_TCP
  986. | IXGB_CONTEXT_DESC_CMD_IDE
  987. | (skb->len - (hdr_len)));
  988. if(++i == adapter->tx_ring.count) i = 0;
  989. adapter->tx_ring.next_to_use = i;
  990. return 1;
  991. }
  992. #endif
  993. return 0;
  994. }
  995. static boolean_t
  996. ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
  997. {
  998. struct ixgb_context_desc *context_desc;
  999. unsigned int i;
  1000. uint8_t css, cso;
  1001. if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  1002. struct ixgb_buffer *buffer_info;
  1003. css = skb->h.raw - skb->data;
  1004. cso = (skb->h.raw + skb->csum) - skb->data;
  1005. i = adapter->tx_ring.next_to_use;
  1006. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  1007. buffer_info = &adapter->tx_ring.buffer_info[i];
  1008. WARN_ON(buffer_info->dma != 0);
  1009. context_desc->tucss = css;
  1010. context_desc->tucso = cso;
  1011. context_desc->tucse = 0;
  1012. /* zero out any previously existing data in one instruction */
  1013. *(uint32_t *)&(context_desc->ipcss) = 0;
  1014. context_desc->status = 0;
  1015. context_desc->hdr_len = 0;
  1016. context_desc->mss = 0;
  1017. context_desc->cmd_type_len =
  1018. cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
  1019. | IXGB_TX_DESC_CMD_IDE);
  1020. if(++i == adapter->tx_ring.count) i = 0;
  1021. adapter->tx_ring.next_to_use = i;
  1022. return TRUE;
  1023. }
  1024. return FALSE;
  1025. }
  1026. #define IXGB_MAX_TXD_PWR 14
  1027. #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
  1028. static int
  1029. ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
  1030. unsigned int first)
  1031. {
  1032. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1033. struct ixgb_buffer *buffer_info;
  1034. int len = skb->len;
  1035. unsigned int offset = 0, size, count = 0, i;
  1036. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  1037. unsigned int f;
  1038. len -= skb->data_len;
  1039. i = tx_ring->next_to_use;
  1040. while(len) {
  1041. buffer_info = &tx_ring->buffer_info[i];
  1042. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1043. buffer_info->length = size;
  1044. WARN_ON(buffer_info->dma != 0);
  1045. buffer_info->dma =
  1046. pci_map_single(adapter->pdev,
  1047. skb->data + offset,
  1048. size,
  1049. PCI_DMA_TODEVICE);
  1050. buffer_info->time_stamp = jiffies;
  1051. buffer_info->next_to_watch = 0;
  1052. len -= size;
  1053. offset += size;
  1054. count++;
  1055. if(++i == tx_ring->count) i = 0;
  1056. }
  1057. for(f = 0; f < nr_frags; f++) {
  1058. struct skb_frag_struct *frag;
  1059. frag = &skb_shinfo(skb)->frags[f];
  1060. len = frag->size;
  1061. offset = 0;
  1062. while(len) {
  1063. buffer_info = &tx_ring->buffer_info[i];
  1064. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1065. buffer_info->length = size;
  1066. buffer_info->dma =
  1067. pci_map_page(adapter->pdev,
  1068. frag->page,
  1069. frag->page_offset + offset,
  1070. size,
  1071. PCI_DMA_TODEVICE);
  1072. buffer_info->time_stamp = jiffies;
  1073. buffer_info->next_to_watch = 0;
  1074. len -= size;
  1075. offset += size;
  1076. count++;
  1077. if(++i == tx_ring->count) i = 0;
  1078. }
  1079. }
  1080. i = (i == 0) ? tx_ring->count - 1 : i - 1;
  1081. tx_ring->buffer_info[i].skb = skb;
  1082. tx_ring->buffer_info[first].next_to_watch = i;
  1083. return count;
  1084. }
  1085. static void
  1086. ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
  1087. {
  1088. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1089. struct ixgb_tx_desc *tx_desc = NULL;
  1090. struct ixgb_buffer *buffer_info;
  1091. uint32_t cmd_type_len = adapter->tx_cmd_type;
  1092. uint8_t status = 0;
  1093. uint8_t popts = 0;
  1094. unsigned int i;
  1095. if(tx_flags & IXGB_TX_FLAGS_TSO) {
  1096. cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
  1097. popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
  1098. }
  1099. if(tx_flags & IXGB_TX_FLAGS_CSUM)
  1100. popts |= IXGB_TX_DESC_POPTS_TXSM;
  1101. if(tx_flags & IXGB_TX_FLAGS_VLAN) {
  1102. cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
  1103. }
  1104. i = tx_ring->next_to_use;
  1105. while(count--) {
  1106. buffer_info = &tx_ring->buffer_info[i];
  1107. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1108. tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1109. tx_desc->cmd_type_len =
  1110. cpu_to_le32(cmd_type_len | buffer_info->length);
  1111. tx_desc->status = status;
  1112. tx_desc->popts = popts;
  1113. tx_desc->vlan = cpu_to_le16(vlan_id);
  1114. if(++i == tx_ring->count) i = 0;
  1115. }
  1116. tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
  1117. | IXGB_TX_DESC_CMD_RS );
  1118. /* Force memory writes to complete before letting h/w
  1119. * know there are new descriptors to fetch. (Only
  1120. * applicable for weak-ordered memory model archs,
  1121. * such as IA-64). */
  1122. wmb();
  1123. tx_ring->next_to_use = i;
  1124. IXGB_WRITE_REG(&adapter->hw, TDT, i);
  1125. }
  1126. /* Tx Descriptors needed, worst case */
  1127. #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
  1128. (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  1129. #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
  1130. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
  1131. static int
  1132. ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1133. {
  1134. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1135. unsigned int first;
  1136. unsigned int tx_flags = 0;
  1137. unsigned long flags;
  1138. int vlan_id = 0;
  1139. int tso;
  1140. if(skb->len <= 0) {
  1141. dev_kfree_skb_any(skb);
  1142. return 0;
  1143. }
  1144. #ifdef NETIF_F_LLTX
  1145. local_irq_save(flags);
  1146. if (!spin_trylock(&adapter->tx_lock)) {
  1147. /* Collision - tell upper layer to requeue */
  1148. local_irq_restore(flags);
  1149. return NETDEV_TX_LOCKED;
  1150. }
  1151. #else
  1152. spin_lock_irqsave(&adapter->tx_lock, flags);
  1153. #endif
  1154. if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
  1155. netif_stop_queue(netdev);
  1156. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1157. return NETDEV_TX_BUSY;
  1158. }
  1159. #ifndef NETIF_F_LLTX
  1160. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1161. #endif
  1162. if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
  1163. tx_flags |= IXGB_TX_FLAGS_VLAN;
  1164. vlan_id = vlan_tx_tag_get(skb);
  1165. }
  1166. first = adapter->tx_ring.next_to_use;
  1167. tso = ixgb_tso(adapter, skb);
  1168. if (tso < 0) {
  1169. dev_kfree_skb_any(skb);
  1170. #ifdef NETIF_F_LLTX
  1171. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1172. #endif
  1173. return NETDEV_TX_OK;
  1174. }
  1175. if (likely(tso))
  1176. tx_flags |= IXGB_TX_FLAGS_TSO;
  1177. else if(ixgb_tx_csum(adapter, skb))
  1178. tx_flags |= IXGB_TX_FLAGS_CSUM;
  1179. ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
  1180. tx_flags);
  1181. netdev->trans_start = jiffies;
  1182. #ifdef NETIF_F_LLTX
  1183. /* Make sure there is space in the ring for the next send. */
  1184. if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
  1185. netif_stop_queue(netdev);
  1186. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1187. #endif
  1188. return NETDEV_TX_OK;
  1189. }
  1190. /**
  1191. * ixgb_tx_timeout - Respond to a Tx Hang
  1192. * @netdev: network interface device structure
  1193. **/
  1194. static void
  1195. ixgb_tx_timeout(struct net_device *netdev)
  1196. {
  1197. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1198. /* Do the reset outside of interrupt context */
  1199. schedule_work(&adapter->tx_timeout_task);
  1200. }
  1201. static void
  1202. ixgb_tx_timeout_task(struct net_device *netdev)
  1203. {
  1204. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1205. adapter->tx_timeout_count++;
  1206. ixgb_down(adapter, TRUE);
  1207. ixgb_up(adapter);
  1208. }
  1209. /**
  1210. * ixgb_get_stats - Get System Network Statistics
  1211. * @netdev: network interface device structure
  1212. *
  1213. * Returns the address of the device statistics structure.
  1214. * The statistics are actually updated from the timer callback.
  1215. **/
  1216. static struct net_device_stats *
  1217. ixgb_get_stats(struct net_device *netdev)
  1218. {
  1219. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1220. return &adapter->net_stats;
  1221. }
  1222. /**
  1223. * ixgb_change_mtu - Change the Maximum Transfer Unit
  1224. * @netdev: network interface device structure
  1225. * @new_mtu: new value for maximum frame size
  1226. *
  1227. * Returns 0 on success, negative on failure
  1228. **/
  1229. static int
  1230. ixgb_change_mtu(struct net_device *netdev, int new_mtu)
  1231. {
  1232. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1233. int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1234. int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1235. if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
  1236. || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
  1237. DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
  1238. return -EINVAL;
  1239. }
  1240. adapter->rx_buffer_len = max_frame;
  1241. netdev->mtu = new_mtu;
  1242. if ((old_max_frame != max_frame) && netif_running(netdev)) {
  1243. ixgb_down(adapter, TRUE);
  1244. ixgb_up(adapter);
  1245. }
  1246. return 0;
  1247. }
  1248. /**
  1249. * ixgb_update_stats - Update the board statistics counters.
  1250. * @adapter: board private structure
  1251. **/
  1252. void
  1253. ixgb_update_stats(struct ixgb_adapter *adapter)
  1254. {
  1255. struct net_device *netdev = adapter->netdev;
  1256. struct pci_dev *pdev = adapter->pdev;
  1257. /* Prevent stats update while adapter is being reset */
  1258. if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
  1259. return;
  1260. if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
  1261. (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
  1262. u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
  1263. u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
  1264. u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
  1265. u64 bcast = ((u64)bcast_h << 32) | bcast_l;
  1266. multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
  1267. /* fix up multicast stats by removing broadcasts */
  1268. if(multi >= bcast)
  1269. multi -= bcast;
  1270. adapter->stats.mprcl += (multi & 0xFFFFFFFF);
  1271. adapter->stats.mprch += (multi >> 32);
  1272. adapter->stats.bprcl += bcast_l;
  1273. adapter->stats.bprch += bcast_h;
  1274. } else {
  1275. adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
  1276. adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
  1277. adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
  1278. adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
  1279. }
  1280. adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
  1281. adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
  1282. adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
  1283. adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
  1284. adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
  1285. adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
  1286. adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
  1287. adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
  1288. adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
  1289. adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
  1290. adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
  1291. adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
  1292. adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
  1293. adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
  1294. adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
  1295. adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
  1296. adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
  1297. adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
  1298. adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
  1299. adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
  1300. adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
  1301. adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
  1302. adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
  1303. adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
  1304. adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
  1305. adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
  1306. adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
  1307. adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
  1308. adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
  1309. adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
  1310. adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
  1311. adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
  1312. adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
  1313. adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
  1314. adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
  1315. adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
  1316. adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
  1317. adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
  1318. adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
  1319. adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
  1320. adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
  1321. adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
  1322. adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
  1323. adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
  1324. adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
  1325. adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
  1326. adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
  1327. adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
  1328. adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
  1329. adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
  1330. adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
  1331. adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
  1332. adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
  1333. adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
  1334. adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
  1335. adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
  1336. /* Fill out the OS statistics structure */
  1337. adapter->net_stats.rx_packets = adapter->stats.gprcl;
  1338. adapter->net_stats.tx_packets = adapter->stats.gptcl;
  1339. adapter->net_stats.rx_bytes = adapter->stats.gorcl;
  1340. adapter->net_stats.tx_bytes = adapter->stats.gotcl;
  1341. adapter->net_stats.multicast = adapter->stats.mprcl;
  1342. adapter->net_stats.collisions = 0;
  1343. /* ignore RLEC as it reports errors for padded (<64bytes) frames
  1344. * with a length in the type/len field */
  1345. adapter->net_stats.rx_errors =
  1346. /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
  1347. adapter->stats.ruc +
  1348. adapter->stats.roc /*+ adapter->stats.rlec */ +
  1349. adapter->stats.icbc +
  1350. adapter->stats.ecbc + adapter->stats.mpc;
  1351. /* see above
  1352. * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  1353. */
  1354. adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  1355. adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
  1356. adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
  1357. adapter->net_stats.rx_over_errors = adapter->stats.mpc;
  1358. adapter->net_stats.tx_errors = 0;
  1359. adapter->net_stats.rx_frame_errors = 0;
  1360. adapter->net_stats.tx_aborted_errors = 0;
  1361. adapter->net_stats.tx_carrier_errors = 0;
  1362. adapter->net_stats.tx_fifo_errors = 0;
  1363. adapter->net_stats.tx_heartbeat_errors = 0;
  1364. adapter->net_stats.tx_window_errors = 0;
  1365. }
  1366. #define IXGB_MAX_INTR 10
  1367. /**
  1368. * ixgb_intr - Interrupt Handler
  1369. * @irq: interrupt number
  1370. * @data: pointer to a network interface device structure
  1371. **/
  1372. static irqreturn_t
  1373. ixgb_intr(int irq, void *data)
  1374. {
  1375. struct net_device *netdev = data;
  1376. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1377. struct ixgb_hw *hw = &adapter->hw;
  1378. uint32_t icr = IXGB_READ_REG(hw, ICR);
  1379. #ifndef CONFIG_IXGB_NAPI
  1380. unsigned int i;
  1381. #endif
  1382. if(unlikely(!icr))
  1383. return IRQ_NONE; /* Not our interrupt */
  1384. if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
  1385. mod_timer(&adapter->watchdog_timer, jiffies);
  1386. }
  1387. #ifdef CONFIG_IXGB_NAPI
  1388. if(netif_rx_schedule_prep(netdev)) {
  1389. /* Disable interrupts and register for poll. The flush
  1390. of the posted write is intentionally left out.
  1391. */
  1392. atomic_inc(&adapter->irq_sem);
  1393. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  1394. __netif_rx_schedule(netdev);
  1395. }
  1396. #else
  1397. /* yes, that is actually a & and it is meant to make sure that
  1398. * every pass through this for loop checks both receive and
  1399. * transmit queues for completed descriptors, intended to
  1400. * avoid starvation issues and assist tx/rx fairness. */
  1401. for(i = 0; i < IXGB_MAX_INTR; i++)
  1402. if(!ixgb_clean_rx_irq(adapter) &
  1403. !ixgb_clean_tx_irq(adapter))
  1404. break;
  1405. #endif
  1406. return IRQ_HANDLED;
  1407. }
  1408. #ifdef CONFIG_IXGB_NAPI
  1409. /**
  1410. * ixgb_clean - NAPI Rx polling callback
  1411. * @adapter: board private structure
  1412. **/
  1413. static int
  1414. ixgb_clean(struct net_device *netdev, int *budget)
  1415. {
  1416. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1417. int work_to_do = min(*budget, netdev->quota);
  1418. int tx_cleaned;
  1419. int work_done = 0;
  1420. tx_cleaned = ixgb_clean_tx_irq(adapter);
  1421. ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
  1422. *budget -= work_done;
  1423. netdev->quota -= work_done;
  1424. /* if no Tx and not enough Rx work done, exit the polling mode */
  1425. if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
  1426. netif_rx_complete(netdev);
  1427. ixgb_irq_enable(adapter);
  1428. return 0;
  1429. }
  1430. return 1;
  1431. }
  1432. #endif
  1433. /**
  1434. * ixgb_clean_tx_irq - Reclaim resources after transmit completes
  1435. * @adapter: board private structure
  1436. **/
  1437. static boolean_t
  1438. ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
  1439. {
  1440. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1441. struct net_device *netdev = adapter->netdev;
  1442. struct ixgb_tx_desc *tx_desc, *eop_desc;
  1443. struct ixgb_buffer *buffer_info;
  1444. unsigned int i, eop;
  1445. boolean_t cleaned = FALSE;
  1446. i = tx_ring->next_to_clean;
  1447. eop = tx_ring->buffer_info[i].next_to_watch;
  1448. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1449. while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
  1450. for(cleaned = FALSE; !cleaned; ) {
  1451. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1452. buffer_info = &tx_ring->buffer_info[i];
  1453. if (tx_desc->popts
  1454. & (IXGB_TX_DESC_POPTS_TXSM |
  1455. IXGB_TX_DESC_POPTS_IXSM))
  1456. adapter->hw_csum_tx_good++;
  1457. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  1458. *(uint32_t *)&(tx_desc->status) = 0;
  1459. cleaned = (i == eop);
  1460. if(++i == tx_ring->count) i = 0;
  1461. }
  1462. eop = tx_ring->buffer_info[i].next_to_watch;
  1463. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1464. }
  1465. tx_ring->next_to_clean = i;
  1466. if (unlikely(netif_queue_stopped(netdev))) {
  1467. spin_lock(&adapter->tx_lock);
  1468. if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
  1469. (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
  1470. netif_wake_queue(netdev);
  1471. spin_unlock(&adapter->tx_lock);
  1472. }
  1473. if(adapter->detect_tx_hung) {
  1474. /* detect a transmit hang in hardware, this serializes the
  1475. * check with the clearing of time_stamp and movement of i */
  1476. adapter->detect_tx_hung = FALSE;
  1477. if (tx_ring->buffer_info[eop].dma &&
  1478. time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
  1479. && !(IXGB_READ_REG(&adapter->hw, STATUS) &
  1480. IXGB_STATUS_TXOFF)) {
  1481. /* detected Tx unit hang */
  1482. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  1483. " TDH <%x>\n"
  1484. " TDT <%x>\n"
  1485. " next_to_use <%x>\n"
  1486. " next_to_clean <%x>\n"
  1487. "buffer_info[next_to_clean]\n"
  1488. " time_stamp <%lx>\n"
  1489. " next_to_watch <%x>\n"
  1490. " jiffies <%lx>\n"
  1491. " next_to_watch.status <%x>\n",
  1492. IXGB_READ_REG(&adapter->hw, TDH),
  1493. IXGB_READ_REG(&adapter->hw, TDT),
  1494. tx_ring->next_to_use,
  1495. tx_ring->next_to_clean,
  1496. tx_ring->buffer_info[eop].time_stamp,
  1497. eop,
  1498. jiffies,
  1499. eop_desc->status);
  1500. netif_stop_queue(netdev);
  1501. }
  1502. }
  1503. return cleaned;
  1504. }
  1505. /**
  1506. * ixgb_rx_checksum - Receive Checksum Offload for 82597.
  1507. * @adapter: board private structure
  1508. * @rx_desc: receive descriptor
  1509. * @sk_buff: socket buffer with received data
  1510. **/
  1511. static void
  1512. ixgb_rx_checksum(struct ixgb_adapter *adapter,
  1513. struct ixgb_rx_desc *rx_desc,
  1514. struct sk_buff *skb)
  1515. {
  1516. /* Ignore Checksum bit is set OR
  1517. * TCP Checksum has not been calculated
  1518. */
  1519. if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
  1520. (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
  1521. skb->ip_summed = CHECKSUM_NONE;
  1522. return;
  1523. }
  1524. /* At this point we know the hardware did the TCP checksum */
  1525. /* now look at the TCP checksum error bit */
  1526. if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
  1527. /* let the stack verify checksum errors */
  1528. skb->ip_summed = CHECKSUM_NONE;
  1529. adapter->hw_csum_rx_error++;
  1530. } else {
  1531. /* TCP checksum is good */
  1532. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1533. adapter->hw_csum_rx_good++;
  1534. }
  1535. }
  1536. /**
  1537. * ixgb_clean_rx_irq - Send received data up the network stack,
  1538. * @adapter: board private structure
  1539. **/
  1540. static boolean_t
  1541. #ifdef CONFIG_IXGB_NAPI
  1542. ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
  1543. #else
  1544. ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
  1545. #endif
  1546. {
  1547. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1548. struct net_device *netdev = adapter->netdev;
  1549. struct pci_dev *pdev = adapter->pdev;
  1550. struct ixgb_rx_desc *rx_desc, *next_rxd;
  1551. struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
  1552. uint32_t length;
  1553. unsigned int i, j;
  1554. boolean_t cleaned = FALSE;
  1555. i = rx_ring->next_to_clean;
  1556. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1557. buffer_info = &rx_ring->buffer_info[i];
  1558. while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
  1559. struct sk_buff *skb, *next_skb;
  1560. u8 status;
  1561. #ifdef CONFIG_IXGB_NAPI
  1562. if(*work_done >= work_to_do)
  1563. break;
  1564. (*work_done)++;
  1565. #endif
  1566. status = rx_desc->status;
  1567. skb = buffer_info->skb;
  1568. buffer_info->skb = NULL;
  1569. prefetch(skb->data);
  1570. if(++i == rx_ring->count) i = 0;
  1571. next_rxd = IXGB_RX_DESC(*rx_ring, i);
  1572. prefetch(next_rxd);
  1573. if((j = i + 1) == rx_ring->count) j = 0;
  1574. next2_buffer = &rx_ring->buffer_info[j];
  1575. prefetch(next2_buffer);
  1576. next_buffer = &rx_ring->buffer_info[i];
  1577. next_skb = next_buffer->skb;
  1578. prefetch(next_skb);
  1579. cleaned = TRUE;
  1580. pci_unmap_single(pdev,
  1581. buffer_info->dma,
  1582. buffer_info->length,
  1583. PCI_DMA_FROMDEVICE);
  1584. length = le16_to_cpu(rx_desc->length);
  1585. if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
  1586. /* All receives must fit into a single buffer */
  1587. IXGB_DBG("Receive packet consumed multiple buffers "
  1588. "length<%x>\n", length);
  1589. dev_kfree_skb_irq(skb);
  1590. goto rxdesc_done;
  1591. }
  1592. if (unlikely(rx_desc->errors
  1593. & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
  1594. | IXGB_RX_DESC_ERRORS_P |
  1595. IXGB_RX_DESC_ERRORS_RXE))) {
  1596. dev_kfree_skb_irq(skb);
  1597. goto rxdesc_done;
  1598. }
  1599. /* code added for copybreak, this should improve
  1600. * performance for small packets with large amounts
  1601. * of reassembly being done in the stack */
  1602. #define IXGB_CB_LENGTH 256
  1603. if (length < IXGB_CB_LENGTH) {
  1604. struct sk_buff *new_skb =
  1605. netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
  1606. if (new_skb) {
  1607. skb_reserve(new_skb, NET_IP_ALIGN);
  1608. memcpy(new_skb->data - NET_IP_ALIGN,
  1609. skb->data - NET_IP_ALIGN,
  1610. length + NET_IP_ALIGN);
  1611. /* save the skb in buffer_info as good */
  1612. buffer_info->skb = skb;
  1613. skb = new_skb;
  1614. }
  1615. }
  1616. /* end copybreak code */
  1617. /* Good Receive */
  1618. skb_put(skb, length);
  1619. /* Receive Checksum Offload */
  1620. ixgb_rx_checksum(adapter, rx_desc, skb);
  1621. skb->protocol = eth_type_trans(skb, netdev);
  1622. #ifdef CONFIG_IXGB_NAPI
  1623. if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
  1624. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  1625. le16_to_cpu(rx_desc->special) &
  1626. IXGB_RX_DESC_SPECIAL_VLAN_MASK);
  1627. } else {
  1628. netif_receive_skb(skb);
  1629. }
  1630. #else /* CONFIG_IXGB_NAPI */
  1631. if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
  1632. vlan_hwaccel_rx(skb, adapter->vlgrp,
  1633. le16_to_cpu(rx_desc->special) &
  1634. IXGB_RX_DESC_SPECIAL_VLAN_MASK);
  1635. } else {
  1636. netif_rx(skb);
  1637. }
  1638. #endif /* CONFIG_IXGB_NAPI */
  1639. netdev->last_rx = jiffies;
  1640. rxdesc_done:
  1641. /* clean up descriptor, might be written over by hw */
  1642. rx_desc->status = 0;
  1643. /* use prefetched values */
  1644. rx_desc = next_rxd;
  1645. buffer_info = next_buffer;
  1646. }
  1647. rx_ring->next_to_clean = i;
  1648. ixgb_alloc_rx_buffers(adapter);
  1649. return cleaned;
  1650. }
  1651. /**
  1652. * ixgb_alloc_rx_buffers - Replace used receive buffers
  1653. * @adapter: address of board private structure
  1654. **/
  1655. static void
  1656. ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
  1657. {
  1658. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1659. struct net_device *netdev = adapter->netdev;
  1660. struct pci_dev *pdev = adapter->pdev;
  1661. struct ixgb_rx_desc *rx_desc;
  1662. struct ixgb_buffer *buffer_info;
  1663. struct sk_buff *skb;
  1664. unsigned int i;
  1665. int num_group_tail_writes;
  1666. long cleancount;
  1667. i = rx_ring->next_to_use;
  1668. buffer_info = &rx_ring->buffer_info[i];
  1669. cleancount = IXGB_DESC_UNUSED(rx_ring);
  1670. num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
  1671. /* leave three descriptors unused */
  1672. while(--cleancount > 2) {
  1673. /* recycle! its good for you */
  1674. skb = buffer_info->skb;
  1675. if (skb) {
  1676. skb_trim(skb, 0);
  1677. goto map_skb;
  1678. }
  1679. skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
  1680. + NET_IP_ALIGN);
  1681. if (unlikely(!skb)) {
  1682. /* Better luck next round */
  1683. adapter->alloc_rx_buff_failed++;
  1684. break;
  1685. }
  1686. /* Make buffer alignment 2 beyond a 16 byte boundary
  1687. * this will result in a 16 byte aligned IP header after
  1688. * the 14 byte MAC header is removed
  1689. */
  1690. skb_reserve(skb, NET_IP_ALIGN);
  1691. buffer_info->skb = skb;
  1692. buffer_info->length = adapter->rx_buffer_len;
  1693. map_skb:
  1694. buffer_info->dma = pci_map_single(pdev,
  1695. skb->data,
  1696. adapter->rx_buffer_len,
  1697. PCI_DMA_FROMDEVICE);
  1698. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1699. rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1700. /* guarantee DD bit not set now before h/w gets descriptor
  1701. * this is the rest of the workaround for h/w double
  1702. * writeback. */
  1703. rx_desc->status = 0;
  1704. if(++i == rx_ring->count) i = 0;
  1705. buffer_info = &rx_ring->buffer_info[i];
  1706. }
  1707. if (likely(rx_ring->next_to_use != i)) {
  1708. rx_ring->next_to_use = i;
  1709. if (unlikely(i-- == 0))
  1710. i = (rx_ring->count - 1);
  1711. /* Force memory writes to complete before letting h/w
  1712. * know there are new descriptors to fetch. (Only
  1713. * applicable for weak-ordered memory model archs, such
  1714. * as IA-64). */
  1715. wmb();
  1716. IXGB_WRITE_REG(&adapter->hw, RDT, i);
  1717. }
  1718. }
  1719. /**
  1720. * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
  1721. *
  1722. * @param netdev network interface device structure
  1723. * @param grp indicates to enable or disable tagging/stripping
  1724. **/
  1725. static void
  1726. ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  1727. {
  1728. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1729. uint32_t ctrl, rctl;
  1730. ixgb_irq_disable(adapter);
  1731. adapter->vlgrp = grp;
  1732. if(grp) {
  1733. /* enable VLAN tag insert/strip */
  1734. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1735. ctrl |= IXGB_CTRL0_VME;
  1736. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1737. /* enable VLAN receive filtering */
  1738. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  1739. rctl |= IXGB_RCTL_VFE;
  1740. rctl &= ~IXGB_RCTL_CFIEN;
  1741. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  1742. } else {
  1743. /* disable VLAN tag insert/strip */
  1744. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1745. ctrl &= ~IXGB_CTRL0_VME;
  1746. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1747. /* disable VLAN filtering */
  1748. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  1749. rctl &= ~IXGB_RCTL_VFE;
  1750. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  1751. }
  1752. ixgb_irq_enable(adapter);
  1753. }
  1754. static void
  1755. ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
  1756. {
  1757. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1758. uint32_t vfta, index;
  1759. /* add VID to filter table */
  1760. index = (vid >> 5) & 0x7F;
  1761. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1762. vfta |= (1 << (vid & 0x1F));
  1763. ixgb_write_vfta(&adapter->hw, index, vfta);
  1764. }
  1765. static void
  1766. ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
  1767. {
  1768. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1769. uint32_t vfta, index;
  1770. ixgb_irq_disable(adapter);
  1771. if(adapter->vlgrp)
  1772. adapter->vlgrp->vlan_devices[vid] = NULL;
  1773. ixgb_irq_enable(adapter);
  1774. /* remove VID from filter table*/
  1775. index = (vid >> 5) & 0x7F;
  1776. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1777. vfta &= ~(1 << (vid & 0x1F));
  1778. ixgb_write_vfta(&adapter->hw, index, vfta);
  1779. }
  1780. static void
  1781. ixgb_restore_vlan(struct ixgb_adapter *adapter)
  1782. {
  1783. ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  1784. if(adapter->vlgrp) {
  1785. uint16_t vid;
  1786. for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  1787. if(!adapter->vlgrp->vlan_devices[vid])
  1788. continue;
  1789. ixgb_vlan_rx_add_vid(adapter->netdev, vid);
  1790. }
  1791. }
  1792. }
  1793. #ifdef CONFIG_NET_POLL_CONTROLLER
  1794. /*
  1795. * Polling 'interrupt' - used by things like netconsole to send skbs
  1796. * without having to re-enable interrupts. It's not called while
  1797. * the interrupt routine is executing.
  1798. */
  1799. static void ixgb_netpoll(struct net_device *dev)
  1800. {
  1801. struct ixgb_adapter *adapter = netdev_priv(dev);
  1802. disable_irq(adapter->pdev->irq);
  1803. ixgb_intr(adapter->pdev->irq, dev);
  1804. enable_irq(adapter->pdev->irq);
  1805. }
  1806. #endif
  1807. /**
  1808. * ixgb_io_error_detected() - called when PCI error is detected
  1809. * @pdev pointer to pci device with error
  1810. * @state pci channel state after error
  1811. *
  1812. * This callback is called by the PCI subsystem whenever
  1813. * a PCI bus error is detected.
  1814. */
  1815. static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  1816. enum pci_channel_state state)
  1817. {
  1818. struct net_device *netdev = pci_get_drvdata(pdev);
  1819. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1820. if(netif_running(netdev))
  1821. ixgb_down(adapter, TRUE);
  1822. pci_disable_device(pdev);
  1823. /* Request a slot reset. */
  1824. return PCI_ERS_RESULT_NEED_RESET;
  1825. }
  1826. /**
  1827. * ixgb_io_slot_reset - called after the pci bus has been reset.
  1828. * @pdev pointer to pci device with error
  1829. *
  1830. * This callback is called after the PCI buss has been reset.
  1831. * Basically, this tries to restart the card from scratch.
  1832. * This is a shortened version of the device probe/discovery code,
  1833. * it resembles the first-half of the ixgb_probe() routine.
  1834. */
  1835. static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
  1836. {
  1837. struct net_device *netdev = pci_get_drvdata(pdev);
  1838. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1839. if(pci_enable_device(pdev)) {
  1840. DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
  1841. return PCI_ERS_RESULT_DISCONNECT;
  1842. }
  1843. /* Perform card reset only on one instance of the card */
  1844. if (0 != PCI_FUNC (pdev->devfn))
  1845. return PCI_ERS_RESULT_RECOVERED;
  1846. pci_set_master(pdev);
  1847. netif_carrier_off(netdev);
  1848. netif_stop_queue(netdev);
  1849. ixgb_reset(adapter);
  1850. /* Make sure the EEPROM is good */
  1851. if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  1852. DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
  1853. return PCI_ERS_RESULT_DISCONNECT;
  1854. }
  1855. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  1856. memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  1857. if(!is_valid_ether_addr(netdev->perm_addr)) {
  1858. DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
  1859. return PCI_ERS_RESULT_DISCONNECT;
  1860. }
  1861. return PCI_ERS_RESULT_RECOVERED;
  1862. }
  1863. /**
  1864. * ixgb_io_resume - called when its OK to resume normal operations
  1865. * @pdev pointer to pci device with error
  1866. *
  1867. * The error recovery driver tells us that its OK to resume
  1868. * normal operation. Implementation resembles the second-half
  1869. * of the ixgb_probe() routine.
  1870. */
  1871. static void ixgb_io_resume (struct pci_dev *pdev)
  1872. {
  1873. struct net_device *netdev = pci_get_drvdata(pdev);
  1874. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1875. pci_set_master(pdev);
  1876. if(netif_running(netdev)) {
  1877. if(ixgb_up(adapter)) {
  1878. printk ("ixgb: can't bring device back up after reset\n");
  1879. return;
  1880. }
  1881. }
  1882. netif_device_attach(netdev);
  1883. mod_timer(&adapter->watchdog_timer, jiffies);
  1884. }
  1885. /* ixgb_main.c */