ixgb_main.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363
  1. /*******************************************************************************
  2. Intel PRO/10GbE Linux driver
  3. Copyright(c) 1999 - 2006 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include "ixgb.h"
  22. char ixgb_driver_name[] = "ixgb";
  23. static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  24. #ifndef CONFIG_IXGB_NAPI
  25. #define DRIVERNAPI
  26. #else
  27. #define DRIVERNAPI "-NAPI"
  28. #endif
  29. #define DRV_VERSION "1.0.126-k4"DRIVERNAPI
  30. const char ixgb_driver_version[] = DRV_VERSION;
  31. static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  32. /* ixgb_pci_tbl - PCI Device ID Table
  33. *
  34. * Wildcard entries (PCI_ANY_ID) should come last
  35. * Last entry must be all 0s
  36. *
  37. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  38. * Class, Class Mask, private data (not used) }
  39. */
  40. static struct pci_device_id ixgb_pci_tbl[] = {
  41. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
  42. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  43. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
  44. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  45. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
  46. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  47. {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
  48. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  49. /* required last entry */
  50. {0,}
  51. };
  52. MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  53. /* Local Function Prototypes */
  54. int ixgb_up(struct ixgb_adapter *adapter);
  55. void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
  56. void ixgb_reset(struct ixgb_adapter *adapter);
  57. int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
  58. int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
  59. void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
  60. void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
  61. void ixgb_update_stats(struct ixgb_adapter *adapter);
  62. static int ixgb_init_module(void);
  63. static void ixgb_exit_module(void);
  64. static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  65. static void __devexit ixgb_remove(struct pci_dev *pdev);
  66. static int ixgb_sw_init(struct ixgb_adapter *adapter);
  67. static int ixgb_open(struct net_device *netdev);
  68. static int ixgb_close(struct net_device *netdev);
  69. static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  70. static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  71. static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  72. static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  73. static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  74. static void ixgb_set_multi(struct net_device *netdev);
  75. static void ixgb_watchdog(unsigned long data);
  76. static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  77. static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
  78. static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  79. static int ixgb_set_mac(struct net_device *netdev, void *p);
  80. static irqreturn_t ixgb_intr(int irq, void *data);
  81. static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  82. #ifdef CONFIG_IXGB_NAPI
  83. static int ixgb_clean(struct napi_struct *napi, int budget);
  84. static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
  85. int *work_done, int work_to_do);
  86. #else
  87. static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
  88. #endif
  89. static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
  90. static void ixgb_tx_timeout(struct net_device *dev);
  91. static void ixgb_tx_timeout_task(struct work_struct *work);
  92. static void ixgb_vlan_rx_register(struct net_device *netdev,
  93. struct vlan_group *grp);
  94. static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
  95. static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
  96. static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  97. #ifdef CONFIG_NET_POLL_CONTROLLER
  98. /* for netdump / net console */
  99. static void ixgb_netpoll(struct net_device *dev);
  100. #endif
  101. static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  102. enum pci_channel_state state);
  103. static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  104. static void ixgb_io_resume (struct pci_dev *pdev);
  105. static struct pci_error_handlers ixgb_err_handler = {
  106. .error_detected = ixgb_io_error_detected,
  107. .slot_reset = ixgb_io_slot_reset,
  108. .resume = ixgb_io_resume,
  109. };
  110. static struct pci_driver ixgb_driver = {
  111. .name = ixgb_driver_name,
  112. .id_table = ixgb_pci_tbl,
  113. .probe = ixgb_probe,
  114. .remove = __devexit_p(ixgb_remove),
  115. .err_handler = &ixgb_err_handler
  116. };
  117. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  118. MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
  119. MODULE_LICENSE("GPL");
  120. MODULE_VERSION(DRV_VERSION);
  121. #define DEFAULT_DEBUG_LEVEL_SHIFT 3
  122. static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
  123. module_param(debug, int, 0);
  124. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  125. /* some defines for controlling descriptor fetches in h/w */
  126. #define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
  127. #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
  128. * this */
  129. #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
  130. * is pushed this many descriptors
  131. * from head */
  132. /**
  133. * ixgb_init_module - Driver Registration Routine
  134. *
  135. * ixgb_init_module is the first routine called when the driver is
  136. * loaded. All it does is register with the PCI subsystem.
  137. **/
  138. static int __init
  139. ixgb_init_module(void)
  140. {
  141. printk(KERN_INFO "%s - version %s\n",
  142. ixgb_driver_string, ixgb_driver_version);
  143. printk(KERN_INFO "%s\n", ixgb_copyright);
  144. return pci_register_driver(&ixgb_driver);
  145. }
  146. module_init(ixgb_init_module);
  147. /**
  148. * ixgb_exit_module - Driver Exit Cleanup Routine
  149. *
  150. * ixgb_exit_module is called just before the driver is removed
  151. * from memory.
  152. **/
  153. static void __exit
  154. ixgb_exit_module(void)
  155. {
  156. pci_unregister_driver(&ixgb_driver);
  157. }
  158. module_exit(ixgb_exit_module);
  159. /**
  160. * ixgb_irq_disable - Mask off interrupt generation on the NIC
  161. * @adapter: board private structure
  162. **/
  163. static void
  164. ixgb_irq_disable(struct ixgb_adapter *adapter)
  165. {
  166. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  167. IXGB_WRITE_FLUSH(&adapter->hw);
  168. synchronize_irq(adapter->pdev->irq);
  169. }
  170. /**
  171. * ixgb_irq_enable - Enable default interrupt generation settings
  172. * @adapter: board private structure
  173. **/
  174. static void
  175. ixgb_irq_enable(struct ixgb_adapter *adapter)
  176. {
  177. u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
  178. IXGB_INT_TXDW | IXGB_INT_LSC;
  179. if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
  180. val |= IXGB_INT_GPI0;
  181. IXGB_WRITE_REG(&adapter->hw, IMS, val);
  182. IXGB_WRITE_FLUSH(&adapter->hw);
  183. }
  184. int
  185. ixgb_up(struct ixgb_adapter *adapter)
  186. {
  187. struct net_device *netdev = adapter->netdev;
  188. int err, irq_flags = IRQF_SHARED;
  189. int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  190. struct ixgb_hw *hw = &adapter->hw;
  191. /* hardware has been reset, we need to reload some things */
  192. ixgb_rar_set(hw, netdev->dev_addr, 0);
  193. ixgb_set_multi(netdev);
  194. ixgb_restore_vlan(adapter);
  195. ixgb_configure_tx(adapter);
  196. ixgb_setup_rctl(adapter);
  197. ixgb_configure_rx(adapter);
  198. ixgb_alloc_rx_buffers(adapter);
  199. /* disable interrupts and get the hardware into a known state */
  200. IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
  201. /* only enable MSI if bus is in PCI-X mode */
  202. if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
  203. err = pci_enable_msi(adapter->pdev);
  204. if (!err) {
  205. adapter->have_msi = 1;
  206. irq_flags = 0;
  207. }
  208. /* proceed to try to request regular interrupt */
  209. }
  210. err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags,
  211. netdev->name, netdev);
  212. if (err) {
  213. if (adapter->have_msi)
  214. pci_disable_msi(adapter->pdev);
  215. DPRINTK(PROBE, ERR,
  216. "Unable to allocate interrupt Error: %d\n", err);
  217. return err;
  218. }
  219. if((hw->max_frame_size != max_frame) ||
  220. (hw->max_frame_size !=
  221. (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
  222. hw->max_frame_size = max_frame;
  223. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  224. if(hw->max_frame_size >
  225. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  226. u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
  227. if(!(ctrl0 & IXGB_CTRL0_JFE)) {
  228. ctrl0 |= IXGB_CTRL0_JFE;
  229. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  230. }
  231. }
  232. }
  233. clear_bit(__IXGB_DOWN, &adapter->flags);
  234. #ifdef CONFIG_IXGB_NAPI
  235. napi_enable(&adapter->napi);
  236. #endif
  237. ixgb_irq_enable(adapter);
  238. mod_timer(&adapter->watchdog_timer, jiffies);
  239. return 0;
  240. }
  241. void
  242. ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
  243. {
  244. struct net_device *netdev = adapter->netdev;
  245. /* prevent the interrupt handler from restarting watchdog */
  246. set_bit(__IXGB_DOWN, &adapter->flags);
  247. #ifdef CONFIG_IXGB_NAPI
  248. napi_disable(&adapter->napi);
  249. #endif
  250. /* waiting for NAPI to complete can re-enable interrupts */
  251. ixgb_irq_disable(adapter);
  252. free_irq(adapter->pdev->irq, netdev);
  253. if (adapter->have_msi)
  254. pci_disable_msi(adapter->pdev);
  255. if(kill_watchdog)
  256. del_timer_sync(&adapter->watchdog_timer);
  257. adapter->link_speed = 0;
  258. adapter->link_duplex = 0;
  259. netif_carrier_off(netdev);
  260. netif_stop_queue(netdev);
  261. ixgb_reset(adapter);
  262. ixgb_clean_tx_ring(adapter);
  263. ixgb_clean_rx_ring(adapter);
  264. }
  265. void
  266. ixgb_reset(struct ixgb_adapter *adapter)
  267. {
  268. struct ixgb_hw *hw = &adapter->hw;
  269. ixgb_adapter_stop(hw);
  270. if (!ixgb_init_hw(hw))
  271. DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
  272. /* restore frame size information */
  273. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  274. if (hw->max_frame_size >
  275. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  276. u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
  277. if (!(ctrl0 & IXGB_CTRL0_JFE)) {
  278. ctrl0 |= IXGB_CTRL0_JFE;
  279. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  280. }
  281. }
  282. }
  283. /**
  284. * ixgb_probe - Device Initialization Routine
  285. * @pdev: PCI device information struct
  286. * @ent: entry in ixgb_pci_tbl
  287. *
  288. * Returns 0 on success, negative on failure
  289. *
  290. * ixgb_probe initializes an adapter identified by a pci_dev structure.
  291. * The OS initialization, configuring of the adapter private structure,
  292. * and a hardware reset occur.
  293. **/
  294. static int __devinit
  295. ixgb_probe(struct pci_dev *pdev,
  296. const struct pci_device_id *ent)
  297. {
  298. struct net_device *netdev = NULL;
  299. struct ixgb_adapter *adapter;
  300. static int cards_found = 0;
  301. int pci_using_dac;
  302. int i;
  303. int err;
  304. if((err = pci_enable_device(pdev)))
  305. return err;
  306. if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
  307. !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
  308. pci_using_dac = 1;
  309. } else {
  310. if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
  311. (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
  312. printk(KERN_ERR
  313. "ixgb: No usable DMA configuration, aborting\n");
  314. goto err_dma_mask;
  315. }
  316. pci_using_dac = 0;
  317. }
  318. if((err = pci_request_regions(pdev, ixgb_driver_name)))
  319. goto err_request_regions;
  320. pci_set_master(pdev);
  321. netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
  322. if(!netdev) {
  323. err = -ENOMEM;
  324. goto err_alloc_etherdev;
  325. }
  326. SET_NETDEV_DEV(netdev, &pdev->dev);
  327. pci_set_drvdata(pdev, netdev);
  328. adapter = netdev_priv(netdev);
  329. adapter->netdev = netdev;
  330. adapter->pdev = pdev;
  331. adapter->hw.back = adapter;
  332. adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
  333. adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
  334. pci_resource_len(pdev, BAR_0));
  335. if (!adapter->hw.hw_addr) {
  336. err = -EIO;
  337. goto err_ioremap;
  338. }
  339. for(i = BAR_1; i <= BAR_5; i++) {
  340. if(pci_resource_len(pdev, i) == 0)
  341. continue;
  342. if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  343. adapter->hw.io_base = pci_resource_start(pdev, i);
  344. break;
  345. }
  346. }
  347. netdev->open = &ixgb_open;
  348. netdev->stop = &ixgb_close;
  349. netdev->hard_start_xmit = &ixgb_xmit_frame;
  350. netdev->get_stats = &ixgb_get_stats;
  351. netdev->set_multicast_list = &ixgb_set_multi;
  352. netdev->set_mac_address = &ixgb_set_mac;
  353. netdev->change_mtu = &ixgb_change_mtu;
  354. ixgb_set_ethtool_ops(netdev);
  355. netdev->tx_timeout = &ixgb_tx_timeout;
  356. netdev->watchdog_timeo = 5 * HZ;
  357. #ifdef CONFIG_IXGB_NAPI
  358. netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
  359. #endif
  360. netdev->vlan_rx_register = ixgb_vlan_rx_register;
  361. netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
  362. netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
  363. #ifdef CONFIG_NET_POLL_CONTROLLER
  364. netdev->poll_controller = ixgb_netpoll;
  365. #endif
  366. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  367. adapter->bd_number = cards_found;
  368. adapter->link_speed = 0;
  369. adapter->link_duplex = 0;
  370. /* setup the private structure */
  371. if((err = ixgb_sw_init(adapter)))
  372. goto err_sw_init;
  373. netdev->features = NETIF_F_SG |
  374. NETIF_F_HW_CSUM |
  375. NETIF_F_HW_VLAN_TX |
  376. NETIF_F_HW_VLAN_RX |
  377. NETIF_F_HW_VLAN_FILTER;
  378. netdev->features |= NETIF_F_TSO;
  379. #ifdef NETIF_F_LLTX
  380. netdev->features |= NETIF_F_LLTX;
  381. #endif
  382. if(pci_using_dac)
  383. netdev->features |= NETIF_F_HIGHDMA;
  384. /* make sure the EEPROM is good */
  385. if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  386. DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  387. err = -EIO;
  388. goto err_eeprom;
  389. }
  390. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  391. memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  392. if(!is_valid_ether_addr(netdev->perm_addr)) {
  393. DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  394. err = -EIO;
  395. goto err_eeprom;
  396. }
  397. adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
  398. init_timer(&adapter->watchdog_timer);
  399. adapter->watchdog_timer.function = &ixgb_watchdog;
  400. adapter->watchdog_timer.data = (unsigned long)adapter;
  401. INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
  402. strcpy(netdev->name, "eth%d");
  403. if((err = register_netdev(netdev)))
  404. goto err_register;
  405. /* we're going to reset, so assume we have no link for now */
  406. netif_carrier_off(netdev);
  407. netif_stop_queue(netdev);
  408. DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
  409. ixgb_check_options(adapter);
  410. /* reset the hardware with the new settings */
  411. ixgb_reset(adapter);
  412. cards_found++;
  413. return 0;
  414. err_register:
  415. err_sw_init:
  416. err_eeprom:
  417. iounmap(adapter->hw.hw_addr);
  418. err_ioremap:
  419. free_netdev(netdev);
  420. err_alloc_etherdev:
  421. pci_release_regions(pdev);
  422. err_request_regions:
  423. err_dma_mask:
  424. pci_disable_device(pdev);
  425. return err;
  426. }
  427. /**
  428. * ixgb_remove - Device Removal Routine
  429. * @pdev: PCI device information struct
  430. *
  431. * ixgb_remove is called by the PCI subsystem to alert the driver
  432. * that it should release a PCI device. The could be caused by a
  433. * Hot-Plug event, or because the driver is going to be removed from
  434. * memory.
  435. **/
  436. static void __devexit
  437. ixgb_remove(struct pci_dev *pdev)
  438. {
  439. struct net_device *netdev = pci_get_drvdata(pdev);
  440. struct ixgb_adapter *adapter = netdev_priv(netdev);
  441. unregister_netdev(netdev);
  442. iounmap(adapter->hw.hw_addr);
  443. pci_release_regions(pdev);
  444. free_netdev(netdev);
  445. }
  446. /**
  447. * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
  448. * @adapter: board private structure to initialize
  449. *
  450. * ixgb_sw_init initializes the Adapter private data structure.
  451. * Fields are initialized based on PCI device information and
  452. * OS network device settings (MTU size).
  453. **/
  454. static int __devinit
  455. ixgb_sw_init(struct ixgb_adapter *adapter)
  456. {
  457. struct ixgb_hw *hw = &adapter->hw;
  458. struct net_device *netdev = adapter->netdev;
  459. struct pci_dev *pdev = adapter->pdev;
  460. /* PCI config space info */
  461. hw->vendor_id = pdev->vendor;
  462. hw->device_id = pdev->device;
  463. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  464. hw->subsystem_id = pdev->subsystem_device;
  465. hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  466. adapter->rx_buffer_len = hw->max_frame_size;
  467. if((hw->device_id == IXGB_DEVICE_ID_82597EX)
  468. || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
  469. || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
  470. || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
  471. hw->mac_type = ixgb_82597;
  472. else {
  473. /* should never have loaded on this device */
  474. DPRINTK(PROBE, ERR, "unsupported device id\n");
  475. }
  476. /* enable flow control to be programmed */
  477. hw->fc.send_xon = 1;
  478. spin_lock_init(&adapter->tx_lock);
  479. set_bit(__IXGB_DOWN, &adapter->flags);
  480. return 0;
  481. }
  482. /**
  483. * ixgb_open - Called when a network interface is made active
  484. * @netdev: network interface device structure
  485. *
  486. * Returns 0 on success, negative value on failure
  487. *
  488. * The open entry point is called when a network interface is made
  489. * active by the system (IFF_UP). At this point all resources needed
  490. * for transmit and receive operations are allocated, the interrupt
  491. * handler is registered with the OS, the watchdog timer is started,
  492. * and the stack is notified that the interface is ready.
  493. **/
  494. static int
  495. ixgb_open(struct net_device *netdev)
  496. {
  497. struct ixgb_adapter *adapter = netdev_priv(netdev);
  498. int err;
  499. /* allocate transmit descriptors */
  500. if((err = ixgb_setup_tx_resources(adapter)))
  501. goto err_setup_tx;
  502. /* allocate receive descriptors */
  503. if((err = ixgb_setup_rx_resources(adapter)))
  504. goto err_setup_rx;
  505. if((err = ixgb_up(adapter)))
  506. goto err_up;
  507. return 0;
  508. err_up:
  509. ixgb_free_rx_resources(adapter);
  510. err_setup_rx:
  511. ixgb_free_tx_resources(adapter);
  512. err_setup_tx:
  513. ixgb_reset(adapter);
  514. return err;
  515. }
  516. /**
  517. * ixgb_close - Disables a network interface
  518. * @netdev: network interface device structure
  519. *
  520. * Returns 0, this is not allowed to fail
  521. *
  522. * The close entry point is called when an interface is de-activated
  523. * by the OS. The hardware is still under the drivers control, but
  524. * needs to be disabled. A global MAC reset is issued to stop the
  525. * hardware, and all transmit and receive resources are freed.
  526. **/
  527. static int
  528. ixgb_close(struct net_device *netdev)
  529. {
  530. struct ixgb_adapter *adapter = netdev_priv(netdev);
  531. ixgb_down(adapter, true);
  532. ixgb_free_tx_resources(adapter);
  533. ixgb_free_rx_resources(adapter);
  534. return 0;
  535. }
  536. /**
  537. * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
  538. * @adapter: board private structure
  539. *
  540. * Return 0 on success, negative on failure
  541. **/
  542. int
  543. ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
  544. {
  545. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  546. struct pci_dev *pdev = adapter->pdev;
  547. int size;
  548. size = sizeof(struct ixgb_buffer) * txdr->count;
  549. txdr->buffer_info = vmalloc(size);
  550. if(!txdr->buffer_info) {
  551. DPRINTK(PROBE, ERR,
  552. "Unable to allocate transmit descriptor ring memory\n");
  553. return -ENOMEM;
  554. }
  555. memset(txdr->buffer_info, 0, size);
  556. /* round up to nearest 4K */
  557. txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
  558. txdr->size = ALIGN(txdr->size, 4096);
  559. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  560. if(!txdr->desc) {
  561. vfree(txdr->buffer_info);
  562. DPRINTK(PROBE, ERR,
  563. "Unable to allocate transmit descriptor memory\n");
  564. return -ENOMEM;
  565. }
  566. memset(txdr->desc, 0, txdr->size);
  567. txdr->next_to_use = 0;
  568. txdr->next_to_clean = 0;
  569. return 0;
  570. }
  571. /**
  572. * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
  573. * @adapter: board private structure
  574. *
  575. * Configure the Tx unit of the MAC after a reset.
  576. **/
  577. static void
  578. ixgb_configure_tx(struct ixgb_adapter *adapter)
  579. {
  580. u64 tdba = adapter->tx_ring.dma;
  581. u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
  582. u32 tctl;
  583. struct ixgb_hw *hw = &adapter->hw;
  584. /* Setup the Base and Length of the Tx Descriptor Ring
  585. * tx_ring.dma can be either a 32 or 64 bit value
  586. */
  587. IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  588. IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
  589. IXGB_WRITE_REG(hw, TDLEN, tdlen);
  590. /* Setup the HW Tx Head and Tail descriptor pointers */
  591. IXGB_WRITE_REG(hw, TDH, 0);
  592. IXGB_WRITE_REG(hw, TDT, 0);
  593. /* don't set up txdctl, it induces performance problems if configured
  594. * incorrectly */
  595. /* Set the Tx Interrupt Delay register */
  596. IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  597. /* Program the Transmit Control Register */
  598. tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
  599. IXGB_WRITE_REG(hw, TCTL, tctl);
  600. /* Setup Transmit Descriptor Settings for this adapter */
  601. adapter->tx_cmd_type =
  602. IXGB_TX_DESC_TYPE
  603. | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
  604. }
  605. /**
  606. * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
  607. * @adapter: board private structure
  608. *
  609. * Returns 0 on success, negative on failure
  610. **/
  611. int
  612. ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
  613. {
  614. struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
  615. struct pci_dev *pdev = adapter->pdev;
  616. int size;
  617. size = sizeof(struct ixgb_buffer) * rxdr->count;
  618. rxdr->buffer_info = vmalloc(size);
  619. if(!rxdr->buffer_info) {
  620. DPRINTK(PROBE, ERR,
  621. "Unable to allocate receive descriptor ring\n");
  622. return -ENOMEM;
  623. }
  624. memset(rxdr->buffer_info, 0, size);
  625. /* Round up to nearest 4K */
  626. rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
  627. rxdr->size = ALIGN(rxdr->size, 4096);
  628. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  629. if(!rxdr->desc) {
  630. vfree(rxdr->buffer_info);
  631. DPRINTK(PROBE, ERR,
  632. "Unable to allocate receive descriptors\n");
  633. return -ENOMEM;
  634. }
  635. memset(rxdr->desc, 0, rxdr->size);
  636. rxdr->next_to_clean = 0;
  637. rxdr->next_to_use = 0;
  638. return 0;
  639. }
  640. /**
  641. * ixgb_setup_rctl - configure the receive control register
  642. * @adapter: Board private structure
  643. **/
  644. static void
  645. ixgb_setup_rctl(struct ixgb_adapter *adapter)
  646. {
  647. u32 rctl;
  648. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  649. rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
  650. rctl |=
  651. IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
  652. IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
  653. (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
  654. rctl |= IXGB_RCTL_SECRC;
  655. if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
  656. rctl |= IXGB_RCTL_BSIZE_2048;
  657. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
  658. rctl |= IXGB_RCTL_BSIZE_4096;
  659. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
  660. rctl |= IXGB_RCTL_BSIZE_8192;
  661. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
  662. rctl |= IXGB_RCTL_BSIZE_16384;
  663. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  664. }
  665. /**
  666. * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
  667. * @adapter: board private structure
  668. *
  669. * Configure the Rx unit of the MAC after a reset.
  670. **/
  671. static void
  672. ixgb_configure_rx(struct ixgb_adapter *adapter)
  673. {
  674. u64 rdba = adapter->rx_ring.dma;
  675. u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
  676. struct ixgb_hw *hw = &adapter->hw;
  677. u32 rctl;
  678. u32 rxcsum;
  679. u32 rxdctl;
  680. /* make sure receives are disabled while setting up the descriptors */
  681. rctl = IXGB_READ_REG(hw, RCTL);
  682. IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
  683. /* set the Receive Delay Timer Register */
  684. IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  685. /* Setup the Base and Length of the Rx Descriptor Ring */
  686. IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  687. IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
  688. IXGB_WRITE_REG(hw, RDLEN, rdlen);
  689. /* Setup the HW Rx Head and Tail Descriptor Pointers */
  690. IXGB_WRITE_REG(hw, RDH, 0);
  691. IXGB_WRITE_REG(hw, RDT, 0);
  692. /* set up pre-fetching of receive buffers so we get some before we
  693. * run out (default hardware behavior is to run out before fetching
  694. * more). This sets up to fetch if HTHRESH rx descriptors are avail
  695. * and the descriptors in hw cache are below PTHRESH. This avoids
  696. * the hardware behavior of fetching <=512 descriptors in a single
  697. * burst that pre-empts all other activity, usually causing fifo
  698. * overflows. */
  699. /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
  700. rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
  701. RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
  702. RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
  703. IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
  704. /* Enable Receive Checksum Offload for TCP and UDP */
  705. if (adapter->rx_csum) {
  706. rxcsum = IXGB_READ_REG(hw, RXCSUM);
  707. rxcsum |= IXGB_RXCSUM_TUOFL;
  708. IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
  709. }
  710. /* Enable Receives */
  711. IXGB_WRITE_REG(hw, RCTL, rctl);
  712. }
  713. /**
  714. * ixgb_free_tx_resources - Free Tx Resources
  715. * @adapter: board private structure
  716. *
  717. * Free all transmit software resources
  718. **/
  719. void
  720. ixgb_free_tx_resources(struct ixgb_adapter *adapter)
  721. {
  722. struct pci_dev *pdev = adapter->pdev;
  723. ixgb_clean_tx_ring(adapter);
  724. vfree(adapter->tx_ring.buffer_info);
  725. adapter->tx_ring.buffer_info = NULL;
  726. pci_free_consistent(pdev, adapter->tx_ring.size,
  727. adapter->tx_ring.desc, adapter->tx_ring.dma);
  728. adapter->tx_ring.desc = NULL;
  729. }
  730. static void
  731. ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
  732. struct ixgb_buffer *buffer_info)
  733. {
  734. struct pci_dev *pdev = adapter->pdev;
  735. if (buffer_info->dma)
  736. pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
  737. PCI_DMA_TODEVICE);
  738. if (buffer_info->skb)
  739. dev_kfree_skb_any(buffer_info->skb);
  740. buffer_info->skb = NULL;
  741. buffer_info->dma = 0;
  742. buffer_info->time_stamp = 0;
  743. /* these fields must always be initialized in tx
  744. * buffer_info->length = 0;
  745. * buffer_info->next_to_watch = 0; */
  746. }
  747. /**
  748. * ixgb_clean_tx_ring - Free Tx Buffers
  749. * @adapter: board private structure
  750. **/
  751. static void
  752. ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
  753. {
  754. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  755. struct ixgb_buffer *buffer_info;
  756. unsigned long size;
  757. unsigned int i;
  758. /* Free all the Tx ring sk_buffs */
  759. for(i = 0; i < tx_ring->count; i++) {
  760. buffer_info = &tx_ring->buffer_info[i];
  761. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  762. }
  763. size = sizeof(struct ixgb_buffer) * tx_ring->count;
  764. memset(tx_ring->buffer_info, 0, size);
  765. /* Zero out the descriptor ring */
  766. memset(tx_ring->desc, 0, tx_ring->size);
  767. tx_ring->next_to_use = 0;
  768. tx_ring->next_to_clean = 0;
  769. IXGB_WRITE_REG(&adapter->hw, TDH, 0);
  770. IXGB_WRITE_REG(&adapter->hw, TDT, 0);
  771. }
  772. /**
  773. * ixgb_free_rx_resources - Free Rx Resources
  774. * @adapter: board private structure
  775. *
  776. * Free all receive software resources
  777. **/
  778. void
  779. ixgb_free_rx_resources(struct ixgb_adapter *adapter)
  780. {
  781. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  782. struct pci_dev *pdev = adapter->pdev;
  783. ixgb_clean_rx_ring(adapter);
  784. vfree(rx_ring->buffer_info);
  785. rx_ring->buffer_info = NULL;
  786. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  787. rx_ring->desc = NULL;
  788. }
  789. /**
  790. * ixgb_clean_rx_ring - Free Rx Buffers
  791. * @adapter: board private structure
  792. **/
  793. static void
  794. ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
  795. {
  796. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  797. struct ixgb_buffer *buffer_info;
  798. struct pci_dev *pdev = adapter->pdev;
  799. unsigned long size;
  800. unsigned int i;
  801. /* Free all the Rx ring sk_buffs */
  802. for(i = 0; i < rx_ring->count; i++) {
  803. buffer_info = &rx_ring->buffer_info[i];
  804. if(buffer_info->skb) {
  805. pci_unmap_single(pdev,
  806. buffer_info->dma,
  807. buffer_info->length,
  808. PCI_DMA_FROMDEVICE);
  809. dev_kfree_skb(buffer_info->skb);
  810. buffer_info->skb = NULL;
  811. }
  812. }
  813. size = sizeof(struct ixgb_buffer) * rx_ring->count;
  814. memset(rx_ring->buffer_info, 0, size);
  815. /* Zero out the descriptor ring */
  816. memset(rx_ring->desc, 0, rx_ring->size);
  817. rx_ring->next_to_clean = 0;
  818. rx_ring->next_to_use = 0;
  819. IXGB_WRITE_REG(&adapter->hw, RDH, 0);
  820. IXGB_WRITE_REG(&adapter->hw, RDT, 0);
  821. }
  822. /**
  823. * ixgb_set_mac - Change the Ethernet Address of the NIC
  824. * @netdev: network interface device structure
  825. * @p: pointer to an address structure
  826. *
  827. * Returns 0 on success, negative on failure
  828. **/
  829. static int
  830. ixgb_set_mac(struct net_device *netdev, void *p)
  831. {
  832. struct ixgb_adapter *adapter = netdev_priv(netdev);
  833. struct sockaddr *addr = p;
  834. if(!is_valid_ether_addr(addr->sa_data))
  835. return -EADDRNOTAVAIL;
  836. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  837. ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
  838. return 0;
  839. }
  840. /**
  841. * ixgb_set_multi - Multicast and Promiscuous mode set
  842. * @netdev: network interface device structure
  843. *
  844. * The set_multi entry point is called whenever the multicast address
  845. * list or the network interface flags are updated. This routine is
  846. * responsible for configuring the hardware for proper multicast,
  847. * promiscuous mode, and all-multi behavior.
  848. **/
  849. static void
  850. ixgb_set_multi(struct net_device *netdev)
  851. {
  852. struct ixgb_adapter *adapter = netdev_priv(netdev);
  853. struct ixgb_hw *hw = &adapter->hw;
  854. struct dev_mc_list *mc_ptr;
  855. u32 rctl;
  856. int i;
  857. /* Check for Promiscuous and All Multicast modes */
  858. rctl = IXGB_READ_REG(hw, RCTL);
  859. if(netdev->flags & IFF_PROMISC) {
  860. rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  861. } else if(netdev->flags & IFF_ALLMULTI) {
  862. rctl |= IXGB_RCTL_MPE;
  863. rctl &= ~IXGB_RCTL_UPE;
  864. } else {
  865. rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  866. }
  867. if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
  868. rctl |= IXGB_RCTL_MPE;
  869. IXGB_WRITE_REG(hw, RCTL, rctl);
  870. } else {
  871. u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
  872. IXGB_ETH_LENGTH_OF_ADDRESS];
  873. IXGB_WRITE_REG(hw, RCTL, rctl);
  874. for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
  875. i++, mc_ptr = mc_ptr->next)
  876. memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
  877. mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
  878. ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
  879. }
  880. }
  881. /**
  882. * ixgb_watchdog - Timer Call-back
  883. * @data: pointer to netdev cast into an unsigned long
  884. **/
  885. static void
  886. ixgb_watchdog(unsigned long data)
  887. {
  888. struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
  889. struct net_device *netdev = adapter->netdev;
  890. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  891. ixgb_check_for_link(&adapter->hw);
  892. if (ixgb_check_for_bad_link(&adapter->hw)) {
  893. /* force the reset path */
  894. netif_stop_queue(netdev);
  895. }
  896. if(adapter->hw.link_up) {
  897. if(!netif_carrier_ok(netdev)) {
  898. DPRINTK(LINK, INFO,
  899. "NIC Link is Up 10000 Mbps Full Duplex\n");
  900. adapter->link_speed = 10000;
  901. adapter->link_duplex = FULL_DUPLEX;
  902. netif_carrier_on(netdev);
  903. netif_wake_queue(netdev);
  904. }
  905. } else {
  906. if(netif_carrier_ok(netdev)) {
  907. adapter->link_speed = 0;
  908. adapter->link_duplex = 0;
  909. DPRINTK(LINK, INFO, "NIC Link is Down\n");
  910. netif_carrier_off(netdev);
  911. netif_stop_queue(netdev);
  912. }
  913. }
  914. ixgb_update_stats(adapter);
  915. if(!netif_carrier_ok(netdev)) {
  916. if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
  917. /* We've lost link, so the controller stops DMA,
  918. * but we've got queued Tx work that's never going
  919. * to get done, so reset controller to flush Tx.
  920. * (Do the reset outside of interrupt context). */
  921. schedule_work(&adapter->tx_timeout_task);
  922. }
  923. }
  924. /* Force detection of hung controller every watchdog period */
  925. adapter->detect_tx_hung = true;
  926. /* generate an interrupt to force clean up of any stragglers */
  927. IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
  928. /* Reset the timer */
  929. mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  930. }
  931. #define IXGB_TX_FLAGS_CSUM 0x00000001
  932. #define IXGB_TX_FLAGS_VLAN 0x00000002
  933. #define IXGB_TX_FLAGS_TSO 0x00000004
  934. static int
  935. ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
  936. {
  937. struct ixgb_context_desc *context_desc;
  938. unsigned int i;
  939. u8 ipcss, ipcso, tucss, tucso, hdr_len;
  940. u16 ipcse, tucse, mss;
  941. int err;
  942. if (likely(skb_is_gso(skb))) {
  943. struct ixgb_buffer *buffer_info;
  944. struct iphdr *iph;
  945. if (skb_header_cloned(skb)) {
  946. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  947. if (err)
  948. return err;
  949. }
  950. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  951. mss = skb_shinfo(skb)->gso_size;
  952. iph = ip_hdr(skb);
  953. iph->tot_len = 0;
  954. iph->check = 0;
  955. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  956. iph->daddr, 0,
  957. IPPROTO_TCP, 0);
  958. ipcss = skb_network_offset(skb);
  959. ipcso = (void *)&(iph->check) - (void *)skb->data;
  960. ipcse = skb_transport_offset(skb) - 1;
  961. tucss = skb_transport_offset(skb);
  962. tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
  963. tucse = 0;
  964. i = adapter->tx_ring.next_to_use;
  965. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  966. buffer_info = &adapter->tx_ring.buffer_info[i];
  967. WARN_ON(buffer_info->dma != 0);
  968. context_desc->ipcss = ipcss;
  969. context_desc->ipcso = ipcso;
  970. context_desc->ipcse = cpu_to_le16(ipcse);
  971. context_desc->tucss = tucss;
  972. context_desc->tucso = tucso;
  973. context_desc->tucse = cpu_to_le16(tucse);
  974. context_desc->mss = cpu_to_le16(mss);
  975. context_desc->hdr_len = hdr_len;
  976. context_desc->status = 0;
  977. context_desc->cmd_type_len = cpu_to_le32(
  978. IXGB_CONTEXT_DESC_TYPE
  979. | IXGB_CONTEXT_DESC_CMD_TSE
  980. | IXGB_CONTEXT_DESC_CMD_IP
  981. | IXGB_CONTEXT_DESC_CMD_TCP
  982. | IXGB_CONTEXT_DESC_CMD_IDE
  983. | (skb->len - (hdr_len)));
  984. if(++i == adapter->tx_ring.count) i = 0;
  985. adapter->tx_ring.next_to_use = i;
  986. return 1;
  987. }
  988. return 0;
  989. }
  990. static bool
  991. ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
  992. {
  993. struct ixgb_context_desc *context_desc;
  994. unsigned int i;
  995. u8 css, cso;
  996. if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  997. struct ixgb_buffer *buffer_info;
  998. css = skb_transport_offset(skb);
  999. cso = css + skb->csum_offset;
  1000. i = adapter->tx_ring.next_to_use;
  1001. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  1002. buffer_info = &adapter->tx_ring.buffer_info[i];
  1003. WARN_ON(buffer_info->dma != 0);
  1004. context_desc->tucss = css;
  1005. context_desc->tucso = cso;
  1006. context_desc->tucse = 0;
  1007. /* zero out any previously existing data in one instruction */
  1008. *(u32 *)&(context_desc->ipcss) = 0;
  1009. context_desc->status = 0;
  1010. context_desc->hdr_len = 0;
  1011. context_desc->mss = 0;
  1012. context_desc->cmd_type_len =
  1013. cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
  1014. | IXGB_TX_DESC_CMD_IDE);
  1015. if(++i == adapter->tx_ring.count) i = 0;
  1016. adapter->tx_ring.next_to_use = i;
  1017. return true;
  1018. }
  1019. return false;
  1020. }
  1021. #define IXGB_MAX_TXD_PWR 14
  1022. #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
  1023. static int
  1024. ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
  1025. unsigned int first)
  1026. {
  1027. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1028. struct ixgb_buffer *buffer_info;
  1029. int len = skb->len;
  1030. unsigned int offset = 0, size, count = 0, i;
  1031. unsigned int mss = skb_shinfo(skb)->gso_size;
  1032. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  1033. unsigned int f;
  1034. len -= skb->data_len;
  1035. i = tx_ring->next_to_use;
  1036. while(len) {
  1037. buffer_info = &tx_ring->buffer_info[i];
  1038. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1039. /* Workaround for premature desc write-backs
  1040. * in TSO mode. Append 4-byte sentinel desc */
  1041. if (unlikely(mss && !nr_frags && size == len && size > 8))
  1042. size -= 4;
  1043. buffer_info->length = size;
  1044. WARN_ON(buffer_info->dma != 0);
  1045. buffer_info->dma =
  1046. pci_map_single(adapter->pdev,
  1047. skb->data + offset,
  1048. size,
  1049. PCI_DMA_TODEVICE);
  1050. buffer_info->time_stamp = jiffies;
  1051. buffer_info->next_to_watch = 0;
  1052. len -= size;
  1053. offset += size;
  1054. count++;
  1055. if(++i == tx_ring->count) i = 0;
  1056. }
  1057. for(f = 0; f < nr_frags; f++) {
  1058. struct skb_frag_struct *frag;
  1059. frag = &skb_shinfo(skb)->frags[f];
  1060. len = frag->size;
  1061. offset = 0;
  1062. while(len) {
  1063. buffer_info = &tx_ring->buffer_info[i];
  1064. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1065. /* Workaround for premature desc write-backs
  1066. * in TSO mode. Append 4-byte sentinel desc */
  1067. if (unlikely(mss && (f == (nr_frags - 1))
  1068. && size == len && size > 8))
  1069. size -= 4;
  1070. buffer_info->length = size;
  1071. buffer_info->dma =
  1072. pci_map_page(adapter->pdev,
  1073. frag->page,
  1074. frag->page_offset + offset,
  1075. size,
  1076. PCI_DMA_TODEVICE);
  1077. buffer_info->time_stamp = jiffies;
  1078. buffer_info->next_to_watch = 0;
  1079. len -= size;
  1080. offset += size;
  1081. count++;
  1082. if(++i == tx_ring->count) i = 0;
  1083. }
  1084. }
  1085. i = (i == 0) ? tx_ring->count - 1 : i - 1;
  1086. tx_ring->buffer_info[i].skb = skb;
  1087. tx_ring->buffer_info[first].next_to_watch = i;
  1088. return count;
  1089. }
  1090. static void
  1091. ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
  1092. {
  1093. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1094. struct ixgb_tx_desc *tx_desc = NULL;
  1095. struct ixgb_buffer *buffer_info;
  1096. u32 cmd_type_len = adapter->tx_cmd_type;
  1097. u8 status = 0;
  1098. u8 popts = 0;
  1099. unsigned int i;
  1100. if(tx_flags & IXGB_TX_FLAGS_TSO) {
  1101. cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
  1102. popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
  1103. }
  1104. if(tx_flags & IXGB_TX_FLAGS_CSUM)
  1105. popts |= IXGB_TX_DESC_POPTS_TXSM;
  1106. if(tx_flags & IXGB_TX_FLAGS_VLAN) {
  1107. cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
  1108. }
  1109. i = tx_ring->next_to_use;
  1110. while(count--) {
  1111. buffer_info = &tx_ring->buffer_info[i];
  1112. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1113. tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1114. tx_desc->cmd_type_len =
  1115. cpu_to_le32(cmd_type_len | buffer_info->length);
  1116. tx_desc->status = status;
  1117. tx_desc->popts = popts;
  1118. tx_desc->vlan = cpu_to_le16(vlan_id);
  1119. if(++i == tx_ring->count) i = 0;
  1120. }
  1121. tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
  1122. | IXGB_TX_DESC_CMD_RS );
  1123. /* Force memory writes to complete before letting h/w
  1124. * know there are new descriptors to fetch. (Only
  1125. * applicable for weak-ordered memory model archs,
  1126. * such as IA-64). */
  1127. wmb();
  1128. tx_ring->next_to_use = i;
  1129. IXGB_WRITE_REG(&adapter->hw, TDT, i);
  1130. }
  1131. static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
  1132. {
  1133. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1134. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1135. netif_stop_queue(netdev);
  1136. /* Herbert's original patch had:
  1137. * smp_mb__after_netif_stop_queue();
  1138. * but since that doesn't exist yet, just open code it. */
  1139. smp_mb();
  1140. /* We need to check again in a case another CPU has just
  1141. * made room available. */
  1142. if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
  1143. return -EBUSY;
  1144. /* A reprieve! */
  1145. netif_start_queue(netdev);
  1146. ++adapter->restart_queue;
  1147. return 0;
  1148. }
  1149. static int ixgb_maybe_stop_tx(struct net_device *netdev,
  1150. struct ixgb_desc_ring *tx_ring, int size)
  1151. {
  1152. if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
  1153. return 0;
  1154. return __ixgb_maybe_stop_tx(netdev, size);
  1155. }
  1156. /* Tx Descriptors needed, worst case */
  1157. #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
  1158. (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  1159. #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
  1160. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
  1161. + 1 /* one more needed for sentinel TSO workaround */
  1162. static int
  1163. ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1164. {
  1165. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1166. unsigned int first;
  1167. unsigned int tx_flags = 0;
  1168. unsigned long flags;
  1169. int vlan_id = 0;
  1170. int tso;
  1171. if (test_bit(__IXGB_DOWN, &adapter->flags)) {
  1172. dev_kfree_skb(skb);
  1173. return NETDEV_TX_OK;
  1174. }
  1175. if(skb->len <= 0) {
  1176. dev_kfree_skb_any(skb);
  1177. return 0;
  1178. }
  1179. #ifdef NETIF_F_LLTX
  1180. if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
  1181. /* Collision - tell upper layer to requeue */
  1182. local_irq_restore(flags);
  1183. return NETDEV_TX_LOCKED;
  1184. }
  1185. #else
  1186. spin_lock_irqsave(&adapter->tx_lock, flags);
  1187. #endif
  1188. if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
  1189. DESC_NEEDED))) {
  1190. netif_stop_queue(netdev);
  1191. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1192. return NETDEV_TX_BUSY;
  1193. }
  1194. #ifndef NETIF_F_LLTX
  1195. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1196. #endif
  1197. if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
  1198. tx_flags |= IXGB_TX_FLAGS_VLAN;
  1199. vlan_id = vlan_tx_tag_get(skb);
  1200. }
  1201. first = adapter->tx_ring.next_to_use;
  1202. tso = ixgb_tso(adapter, skb);
  1203. if (tso < 0) {
  1204. dev_kfree_skb_any(skb);
  1205. #ifdef NETIF_F_LLTX
  1206. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1207. #endif
  1208. return NETDEV_TX_OK;
  1209. }
  1210. if (likely(tso))
  1211. tx_flags |= IXGB_TX_FLAGS_TSO;
  1212. else if(ixgb_tx_csum(adapter, skb))
  1213. tx_flags |= IXGB_TX_FLAGS_CSUM;
  1214. ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
  1215. tx_flags);
  1216. netdev->trans_start = jiffies;
  1217. #ifdef NETIF_F_LLTX
  1218. /* Make sure there is space in the ring for the next send. */
  1219. ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
  1220. spin_unlock_irqrestore(&adapter->tx_lock, flags);
  1221. #endif
  1222. return NETDEV_TX_OK;
  1223. }
  1224. /**
  1225. * ixgb_tx_timeout - Respond to a Tx Hang
  1226. * @netdev: network interface device structure
  1227. **/
  1228. static void
  1229. ixgb_tx_timeout(struct net_device *netdev)
  1230. {
  1231. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1232. /* Do the reset outside of interrupt context */
  1233. schedule_work(&adapter->tx_timeout_task);
  1234. }
  1235. static void
  1236. ixgb_tx_timeout_task(struct work_struct *work)
  1237. {
  1238. struct ixgb_adapter *adapter =
  1239. container_of(work, struct ixgb_adapter, tx_timeout_task);
  1240. adapter->tx_timeout_count++;
  1241. ixgb_down(adapter, true);
  1242. ixgb_up(adapter);
  1243. }
  1244. /**
  1245. * ixgb_get_stats - Get System Network Statistics
  1246. * @netdev: network interface device structure
  1247. *
  1248. * Returns the address of the device statistics structure.
  1249. * The statistics are actually updated from the timer callback.
  1250. **/
  1251. static struct net_device_stats *
  1252. ixgb_get_stats(struct net_device *netdev)
  1253. {
  1254. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1255. return &adapter->net_stats;
  1256. }
  1257. /**
  1258. * ixgb_change_mtu - Change the Maximum Transfer Unit
  1259. * @netdev: network interface device structure
  1260. * @new_mtu: new value for maximum frame size
  1261. *
  1262. * Returns 0 on success, negative on failure
  1263. **/
  1264. static int
  1265. ixgb_change_mtu(struct net_device *netdev, int new_mtu)
  1266. {
  1267. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1268. int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1269. int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1270. if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
  1271. || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
  1272. DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
  1273. return -EINVAL;
  1274. }
  1275. adapter->rx_buffer_len = max_frame;
  1276. netdev->mtu = new_mtu;
  1277. if ((old_max_frame != max_frame) && netif_running(netdev)) {
  1278. ixgb_down(adapter, true);
  1279. ixgb_up(adapter);
  1280. }
  1281. return 0;
  1282. }
  1283. /**
  1284. * ixgb_update_stats - Update the board statistics counters.
  1285. * @adapter: board private structure
  1286. **/
  1287. void
  1288. ixgb_update_stats(struct ixgb_adapter *adapter)
  1289. {
  1290. struct net_device *netdev = adapter->netdev;
  1291. struct pci_dev *pdev = adapter->pdev;
  1292. /* Prevent stats update while adapter is being reset */
  1293. if (pci_channel_offline(pdev))
  1294. return;
  1295. if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
  1296. (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
  1297. u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
  1298. u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
  1299. u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
  1300. u64 bcast = ((u64)bcast_h << 32) | bcast_l;
  1301. multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
  1302. /* fix up multicast stats by removing broadcasts */
  1303. if(multi >= bcast)
  1304. multi -= bcast;
  1305. adapter->stats.mprcl += (multi & 0xFFFFFFFF);
  1306. adapter->stats.mprch += (multi >> 32);
  1307. adapter->stats.bprcl += bcast_l;
  1308. adapter->stats.bprch += bcast_h;
  1309. } else {
  1310. adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
  1311. adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
  1312. adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
  1313. adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
  1314. }
  1315. adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
  1316. adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
  1317. adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
  1318. adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
  1319. adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
  1320. adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
  1321. adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
  1322. adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
  1323. adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
  1324. adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
  1325. adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
  1326. adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
  1327. adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
  1328. adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
  1329. adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
  1330. adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
  1331. adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
  1332. adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
  1333. adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
  1334. adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
  1335. adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
  1336. adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
  1337. adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
  1338. adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
  1339. adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
  1340. adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
  1341. adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
  1342. adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
  1343. adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
  1344. adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
  1345. adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
  1346. adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
  1347. adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
  1348. adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
  1349. adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
  1350. adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
  1351. adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
  1352. adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
  1353. adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
  1354. adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
  1355. adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
  1356. adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
  1357. adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
  1358. adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
  1359. adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
  1360. adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
  1361. adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
  1362. adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
  1363. adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
  1364. adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
  1365. adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
  1366. adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
  1367. adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
  1368. adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
  1369. adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
  1370. adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
  1371. /* Fill out the OS statistics structure */
  1372. adapter->net_stats.rx_packets = adapter->stats.gprcl;
  1373. adapter->net_stats.tx_packets = adapter->stats.gptcl;
  1374. adapter->net_stats.rx_bytes = adapter->stats.gorcl;
  1375. adapter->net_stats.tx_bytes = adapter->stats.gotcl;
  1376. adapter->net_stats.multicast = adapter->stats.mprcl;
  1377. adapter->net_stats.collisions = 0;
  1378. /* ignore RLEC as it reports errors for padded (<64bytes) frames
  1379. * with a length in the type/len field */
  1380. adapter->net_stats.rx_errors =
  1381. /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
  1382. adapter->stats.ruc +
  1383. adapter->stats.roc /*+ adapter->stats.rlec */ +
  1384. adapter->stats.icbc +
  1385. adapter->stats.ecbc + adapter->stats.mpc;
  1386. /* see above
  1387. * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  1388. */
  1389. adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  1390. adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
  1391. adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
  1392. adapter->net_stats.rx_over_errors = adapter->stats.mpc;
  1393. adapter->net_stats.tx_errors = 0;
  1394. adapter->net_stats.rx_frame_errors = 0;
  1395. adapter->net_stats.tx_aborted_errors = 0;
  1396. adapter->net_stats.tx_carrier_errors = 0;
  1397. adapter->net_stats.tx_fifo_errors = 0;
  1398. adapter->net_stats.tx_heartbeat_errors = 0;
  1399. adapter->net_stats.tx_window_errors = 0;
  1400. }
  1401. #define IXGB_MAX_INTR 10
  1402. /**
  1403. * ixgb_intr - Interrupt Handler
  1404. * @irq: interrupt number
  1405. * @data: pointer to a network interface device structure
  1406. **/
  1407. static irqreturn_t
  1408. ixgb_intr(int irq, void *data)
  1409. {
  1410. struct net_device *netdev = data;
  1411. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1412. struct ixgb_hw *hw = &adapter->hw;
  1413. u32 icr = IXGB_READ_REG(hw, ICR);
  1414. #ifndef CONFIG_IXGB_NAPI
  1415. unsigned int i;
  1416. #endif
  1417. if(unlikely(!icr))
  1418. return IRQ_NONE; /* Not our interrupt */
  1419. if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
  1420. if (!test_bit(__IXGB_DOWN, &adapter->flags))
  1421. mod_timer(&adapter->watchdog_timer, jiffies);
  1422. #ifdef CONFIG_IXGB_NAPI
  1423. if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
  1424. /* Disable interrupts and register for poll. The flush
  1425. of the posted write is intentionally left out.
  1426. */
  1427. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  1428. __netif_rx_schedule(netdev, &adapter->napi);
  1429. }
  1430. #else
  1431. /* yes, that is actually a & and it is meant to make sure that
  1432. * every pass through this for loop checks both receive and
  1433. * transmit queues for completed descriptors, intended to
  1434. * avoid starvation issues and assist tx/rx fairness. */
  1435. for(i = 0; i < IXGB_MAX_INTR; i++)
  1436. if(!ixgb_clean_rx_irq(adapter) &
  1437. !ixgb_clean_tx_irq(adapter))
  1438. break;
  1439. #endif
  1440. return IRQ_HANDLED;
  1441. }
  1442. #ifdef CONFIG_IXGB_NAPI
  1443. /**
  1444. * ixgb_clean - NAPI Rx polling callback
  1445. * @adapter: board private structure
  1446. **/
  1447. static int
  1448. ixgb_clean(struct napi_struct *napi, int budget)
  1449. {
  1450. struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
  1451. struct net_device *netdev = adapter->netdev;
  1452. int work_done = 0;
  1453. ixgb_clean_tx_irq(adapter);
  1454. ixgb_clean_rx_irq(adapter, &work_done, budget);
  1455. /* If budget not fully consumed, exit the polling mode */
  1456. if (work_done < budget) {
  1457. netif_rx_complete(netdev, napi);
  1458. ixgb_irq_enable(adapter);
  1459. }
  1460. return work_done;
  1461. }
  1462. #endif
  1463. /**
  1464. * ixgb_clean_tx_irq - Reclaim resources after transmit completes
  1465. * @adapter: board private structure
  1466. **/
  1467. static bool
  1468. ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
  1469. {
  1470. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1471. struct net_device *netdev = adapter->netdev;
  1472. struct ixgb_tx_desc *tx_desc, *eop_desc;
  1473. struct ixgb_buffer *buffer_info;
  1474. unsigned int i, eop;
  1475. bool cleaned = false;
  1476. i = tx_ring->next_to_clean;
  1477. eop = tx_ring->buffer_info[i].next_to_watch;
  1478. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1479. while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
  1480. for (cleaned = false; !cleaned; ) {
  1481. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1482. buffer_info = &tx_ring->buffer_info[i];
  1483. if (tx_desc->popts
  1484. & (IXGB_TX_DESC_POPTS_TXSM |
  1485. IXGB_TX_DESC_POPTS_IXSM))
  1486. adapter->hw_csum_tx_good++;
  1487. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  1488. *(u32 *)&(tx_desc->status) = 0;
  1489. cleaned = (i == eop);
  1490. if(++i == tx_ring->count) i = 0;
  1491. }
  1492. eop = tx_ring->buffer_info[i].next_to_watch;
  1493. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1494. }
  1495. tx_ring->next_to_clean = i;
  1496. if (unlikely(cleaned && netif_carrier_ok(netdev) &&
  1497. IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
  1498. /* Make sure that anybody stopping the queue after this
  1499. * sees the new next_to_clean. */
  1500. smp_mb();
  1501. if (netif_queue_stopped(netdev) &&
  1502. !(test_bit(__IXGB_DOWN, &adapter->flags))) {
  1503. netif_wake_queue(netdev);
  1504. ++adapter->restart_queue;
  1505. }
  1506. }
  1507. if(adapter->detect_tx_hung) {
  1508. /* detect a transmit hang in hardware, this serializes the
  1509. * check with the clearing of time_stamp and movement of i */
  1510. adapter->detect_tx_hung = false;
  1511. if (tx_ring->buffer_info[eop].dma &&
  1512. time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
  1513. && !(IXGB_READ_REG(&adapter->hw, STATUS) &
  1514. IXGB_STATUS_TXOFF)) {
  1515. /* detected Tx unit hang */
  1516. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  1517. " TDH <%x>\n"
  1518. " TDT <%x>\n"
  1519. " next_to_use <%x>\n"
  1520. " next_to_clean <%x>\n"
  1521. "buffer_info[next_to_clean]\n"
  1522. " time_stamp <%lx>\n"
  1523. " next_to_watch <%x>\n"
  1524. " jiffies <%lx>\n"
  1525. " next_to_watch.status <%x>\n",
  1526. IXGB_READ_REG(&adapter->hw, TDH),
  1527. IXGB_READ_REG(&adapter->hw, TDT),
  1528. tx_ring->next_to_use,
  1529. tx_ring->next_to_clean,
  1530. tx_ring->buffer_info[eop].time_stamp,
  1531. eop,
  1532. jiffies,
  1533. eop_desc->status);
  1534. netif_stop_queue(netdev);
  1535. }
  1536. }
  1537. return cleaned;
  1538. }
  1539. /**
  1540. * ixgb_rx_checksum - Receive Checksum Offload for 82597.
  1541. * @adapter: board private structure
  1542. * @rx_desc: receive descriptor
  1543. * @sk_buff: socket buffer with received data
  1544. **/
  1545. static void
  1546. ixgb_rx_checksum(struct ixgb_adapter *adapter,
  1547. struct ixgb_rx_desc *rx_desc,
  1548. struct sk_buff *skb)
  1549. {
  1550. /* Ignore Checksum bit is set OR
  1551. * TCP Checksum has not been calculated
  1552. */
  1553. if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
  1554. (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
  1555. skb->ip_summed = CHECKSUM_NONE;
  1556. return;
  1557. }
  1558. /* At this point we know the hardware did the TCP checksum */
  1559. /* now look at the TCP checksum error bit */
  1560. if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
  1561. /* let the stack verify checksum errors */
  1562. skb->ip_summed = CHECKSUM_NONE;
  1563. adapter->hw_csum_rx_error++;
  1564. } else {
  1565. /* TCP checksum is good */
  1566. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1567. adapter->hw_csum_rx_good++;
  1568. }
  1569. }
  1570. /**
  1571. * ixgb_clean_rx_irq - Send received data up the network stack,
  1572. * @adapter: board private structure
  1573. **/
  1574. static bool
  1575. #ifdef CONFIG_IXGB_NAPI
  1576. ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
  1577. #else
  1578. ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
  1579. #endif
  1580. {
  1581. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1582. struct net_device *netdev = adapter->netdev;
  1583. struct pci_dev *pdev = adapter->pdev;
  1584. struct ixgb_rx_desc *rx_desc, *next_rxd;
  1585. struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
  1586. u32 length;
  1587. unsigned int i, j;
  1588. bool cleaned = false;
  1589. i = rx_ring->next_to_clean;
  1590. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1591. buffer_info = &rx_ring->buffer_info[i];
  1592. while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
  1593. struct sk_buff *skb, *next_skb;
  1594. u8 status;
  1595. #ifdef CONFIG_IXGB_NAPI
  1596. if(*work_done >= work_to_do)
  1597. break;
  1598. (*work_done)++;
  1599. #endif
  1600. status = rx_desc->status;
  1601. skb = buffer_info->skb;
  1602. buffer_info->skb = NULL;
  1603. prefetch(skb->data);
  1604. if(++i == rx_ring->count) i = 0;
  1605. next_rxd = IXGB_RX_DESC(*rx_ring, i);
  1606. prefetch(next_rxd);
  1607. if((j = i + 1) == rx_ring->count) j = 0;
  1608. next2_buffer = &rx_ring->buffer_info[j];
  1609. prefetch(next2_buffer);
  1610. next_buffer = &rx_ring->buffer_info[i];
  1611. next_skb = next_buffer->skb;
  1612. prefetch(next_skb);
  1613. cleaned = true;
  1614. pci_unmap_single(pdev,
  1615. buffer_info->dma,
  1616. buffer_info->length,
  1617. PCI_DMA_FROMDEVICE);
  1618. length = le16_to_cpu(rx_desc->length);
  1619. if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
  1620. /* All receives must fit into a single buffer */
  1621. IXGB_DBG("Receive packet consumed multiple buffers "
  1622. "length<%x>\n", length);
  1623. dev_kfree_skb_irq(skb);
  1624. goto rxdesc_done;
  1625. }
  1626. if (unlikely(rx_desc->errors
  1627. & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
  1628. | IXGB_RX_DESC_ERRORS_P |
  1629. IXGB_RX_DESC_ERRORS_RXE))) {
  1630. dev_kfree_skb_irq(skb);
  1631. goto rxdesc_done;
  1632. }
  1633. /* code added for copybreak, this should improve
  1634. * performance for small packets with large amounts
  1635. * of reassembly being done in the stack */
  1636. #define IXGB_CB_LENGTH 256
  1637. if (length < IXGB_CB_LENGTH) {
  1638. struct sk_buff *new_skb =
  1639. netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
  1640. if (new_skb) {
  1641. skb_reserve(new_skb, NET_IP_ALIGN);
  1642. skb_copy_to_linear_data_offset(new_skb,
  1643. -NET_IP_ALIGN,
  1644. (skb->data -
  1645. NET_IP_ALIGN),
  1646. (length +
  1647. NET_IP_ALIGN));
  1648. /* save the skb in buffer_info as good */
  1649. buffer_info->skb = skb;
  1650. skb = new_skb;
  1651. }
  1652. }
  1653. /* end copybreak code */
  1654. /* Good Receive */
  1655. skb_put(skb, length);
  1656. /* Receive Checksum Offload */
  1657. ixgb_rx_checksum(adapter, rx_desc, skb);
  1658. skb->protocol = eth_type_trans(skb, netdev);
  1659. #ifdef CONFIG_IXGB_NAPI
  1660. if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
  1661. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  1662. le16_to_cpu(rx_desc->special));
  1663. } else {
  1664. netif_receive_skb(skb);
  1665. }
  1666. #else /* CONFIG_IXGB_NAPI */
  1667. if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
  1668. vlan_hwaccel_rx(skb, adapter->vlgrp,
  1669. le16_to_cpu(rx_desc->special));
  1670. } else {
  1671. netif_rx(skb);
  1672. }
  1673. #endif /* CONFIG_IXGB_NAPI */
  1674. netdev->last_rx = jiffies;
  1675. rxdesc_done:
  1676. /* clean up descriptor, might be written over by hw */
  1677. rx_desc->status = 0;
  1678. /* use prefetched values */
  1679. rx_desc = next_rxd;
  1680. buffer_info = next_buffer;
  1681. }
  1682. rx_ring->next_to_clean = i;
  1683. ixgb_alloc_rx_buffers(adapter);
  1684. return cleaned;
  1685. }
  1686. /**
  1687. * ixgb_alloc_rx_buffers - Replace used receive buffers
  1688. * @adapter: address of board private structure
  1689. **/
  1690. static void
  1691. ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
  1692. {
  1693. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1694. struct net_device *netdev = adapter->netdev;
  1695. struct pci_dev *pdev = adapter->pdev;
  1696. struct ixgb_rx_desc *rx_desc;
  1697. struct ixgb_buffer *buffer_info;
  1698. struct sk_buff *skb;
  1699. unsigned int i;
  1700. long cleancount;
  1701. i = rx_ring->next_to_use;
  1702. buffer_info = &rx_ring->buffer_info[i];
  1703. cleancount = IXGB_DESC_UNUSED(rx_ring);
  1704. /* leave three descriptors unused */
  1705. while(--cleancount > 2) {
  1706. /* recycle! its good for you */
  1707. skb = buffer_info->skb;
  1708. if (skb) {
  1709. skb_trim(skb, 0);
  1710. goto map_skb;
  1711. }
  1712. skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
  1713. + NET_IP_ALIGN);
  1714. if (unlikely(!skb)) {
  1715. /* Better luck next round */
  1716. adapter->alloc_rx_buff_failed++;
  1717. break;
  1718. }
  1719. /* Make buffer alignment 2 beyond a 16 byte boundary
  1720. * this will result in a 16 byte aligned IP header after
  1721. * the 14 byte MAC header is removed
  1722. */
  1723. skb_reserve(skb, NET_IP_ALIGN);
  1724. buffer_info->skb = skb;
  1725. buffer_info->length = adapter->rx_buffer_len;
  1726. map_skb:
  1727. buffer_info->dma = pci_map_single(pdev,
  1728. skb->data,
  1729. adapter->rx_buffer_len,
  1730. PCI_DMA_FROMDEVICE);
  1731. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1732. rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1733. /* guarantee DD bit not set now before h/w gets descriptor
  1734. * this is the rest of the workaround for h/w double
  1735. * writeback. */
  1736. rx_desc->status = 0;
  1737. if(++i == rx_ring->count) i = 0;
  1738. buffer_info = &rx_ring->buffer_info[i];
  1739. }
  1740. if (likely(rx_ring->next_to_use != i)) {
  1741. rx_ring->next_to_use = i;
  1742. if (unlikely(i-- == 0))
  1743. i = (rx_ring->count - 1);
  1744. /* Force memory writes to complete before letting h/w
  1745. * know there are new descriptors to fetch. (Only
  1746. * applicable for weak-ordered memory model archs, such
  1747. * as IA-64). */
  1748. wmb();
  1749. IXGB_WRITE_REG(&adapter->hw, RDT, i);
  1750. }
  1751. }
  1752. /**
  1753. * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
  1754. *
  1755. * @param netdev network interface device structure
  1756. * @param grp indicates to enable or disable tagging/stripping
  1757. **/
  1758. static void
  1759. ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  1760. {
  1761. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1762. u32 ctrl, rctl;
  1763. ixgb_irq_disable(adapter);
  1764. adapter->vlgrp = grp;
  1765. if(grp) {
  1766. /* enable VLAN tag insert/strip */
  1767. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1768. ctrl |= IXGB_CTRL0_VME;
  1769. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1770. /* enable VLAN receive filtering */
  1771. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  1772. rctl |= IXGB_RCTL_VFE;
  1773. rctl &= ~IXGB_RCTL_CFIEN;
  1774. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  1775. } else {
  1776. /* disable VLAN tag insert/strip */
  1777. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1778. ctrl &= ~IXGB_CTRL0_VME;
  1779. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1780. /* disable VLAN filtering */
  1781. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  1782. rctl &= ~IXGB_RCTL_VFE;
  1783. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  1784. }
  1785. /* don't enable interrupts unless we are UP */
  1786. if (adapter->netdev->flags & IFF_UP)
  1787. ixgb_irq_enable(adapter);
  1788. }
  1789. static void
  1790. ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  1791. {
  1792. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1793. u32 vfta, index;
  1794. /* add VID to filter table */
  1795. index = (vid >> 5) & 0x7F;
  1796. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1797. vfta |= (1 << (vid & 0x1F));
  1798. ixgb_write_vfta(&adapter->hw, index, vfta);
  1799. }
  1800. static void
  1801. ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  1802. {
  1803. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1804. u32 vfta, index;
  1805. ixgb_irq_disable(adapter);
  1806. vlan_group_set_device(adapter->vlgrp, vid, NULL);
  1807. /* don't enable interrupts unless we are UP */
  1808. if (adapter->netdev->flags & IFF_UP)
  1809. ixgb_irq_enable(adapter);
  1810. /* remove VID from filter table */
  1811. index = (vid >> 5) & 0x7F;
  1812. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1813. vfta &= ~(1 << (vid & 0x1F));
  1814. ixgb_write_vfta(&adapter->hw, index, vfta);
  1815. }
  1816. static void
  1817. ixgb_restore_vlan(struct ixgb_adapter *adapter)
  1818. {
  1819. ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  1820. if(adapter->vlgrp) {
  1821. u16 vid;
  1822. for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  1823. if(!vlan_group_get_device(adapter->vlgrp, vid))
  1824. continue;
  1825. ixgb_vlan_rx_add_vid(adapter->netdev, vid);
  1826. }
  1827. }
  1828. }
  1829. #ifdef CONFIG_NET_POLL_CONTROLLER
  1830. /*
  1831. * Polling 'interrupt' - used by things like netconsole to send skbs
  1832. * without having to re-enable interrupts. It's not called while
  1833. * the interrupt routine is executing.
  1834. */
  1835. static void ixgb_netpoll(struct net_device *dev)
  1836. {
  1837. struct ixgb_adapter *adapter = netdev_priv(dev);
  1838. disable_irq(adapter->pdev->irq);
  1839. ixgb_intr(adapter->pdev->irq, dev);
  1840. enable_irq(adapter->pdev->irq);
  1841. }
  1842. #endif
  1843. /**
  1844. * ixgb_io_error_detected() - called when PCI error is detected
  1845. * @pdev pointer to pci device with error
  1846. * @state pci channel state after error
  1847. *
  1848. * This callback is called by the PCI subsystem whenever
  1849. * a PCI bus error is detected.
  1850. */
  1851. static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  1852. enum pci_channel_state state)
  1853. {
  1854. struct net_device *netdev = pci_get_drvdata(pdev);
  1855. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1856. if(netif_running(netdev))
  1857. ixgb_down(adapter, true);
  1858. pci_disable_device(pdev);
  1859. /* Request a slot reset. */
  1860. return PCI_ERS_RESULT_NEED_RESET;
  1861. }
  1862. /**
  1863. * ixgb_io_slot_reset - called after the pci bus has been reset.
  1864. * @pdev pointer to pci device with error
  1865. *
  1866. * This callback is called after the PCI buss has been reset.
  1867. * Basically, this tries to restart the card from scratch.
  1868. * This is a shortened version of the device probe/discovery code,
  1869. * it resembles the first-half of the ixgb_probe() routine.
  1870. */
  1871. static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
  1872. {
  1873. struct net_device *netdev = pci_get_drvdata(pdev);
  1874. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1875. if(pci_enable_device(pdev)) {
  1876. DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
  1877. return PCI_ERS_RESULT_DISCONNECT;
  1878. }
  1879. /* Perform card reset only on one instance of the card */
  1880. if (0 != PCI_FUNC (pdev->devfn))
  1881. return PCI_ERS_RESULT_RECOVERED;
  1882. pci_set_master(pdev);
  1883. netif_carrier_off(netdev);
  1884. netif_stop_queue(netdev);
  1885. ixgb_reset(adapter);
  1886. /* Make sure the EEPROM is good */
  1887. if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  1888. DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
  1889. return PCI_ERS_RESULT_DISCONNECT;
  1890. }
  1891. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  1892. memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  1893. if(!is_valid_ether_addr(netdev->perm_addr)) {
  1894. DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
  1895. return PCI_ERS_RESULT_DISCONNECT;
  1896. }
  1897. return PCI_ERS_RESULT_RECOVERED;
  1898. }
  1899. /**
  1900. * ixgb_io_resume - called when its OK to resume normal operations
  1901. * @pdev pointer to pci device with error
  1902. *
  1903. * The error recovery driver tells us that its OK to resume
  1904. * normal operation. Implementation resembles the second-half
  1905. * of the ixgb_probe() routine.
  1906. */
  1907. static void ixgb_io_resume (struct pci_dev *pdev)
  1908. {
  1909. struct net_device *netdev = pci_get_drvdata(pdev);
  1910. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1911. pci_set_master(pdev);
  1912. if(netif_running(netdev)) {
  1913. if(ixgb_up(adapter)) {
  1914. printk ("ixgb: can't bring device back up after reset\n");
  1915. return;
  1916. }
  1917. }
  1918. netif_device_attach(netdev);
  1919. mod_timer(&adapter->watchdog_timer, jiffies);
  1920. }
  1921. /* ixgb_main.c */