pch_gbe_main.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491
  1. /*
  2. * Copyright (C) 1999 - 2010 Intel Corporation.
  3. * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
  4. *
  5. * This code was derived from the Intel e1000e Linux driver.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include "pch_gbe.h"
  21. #include "pch_gbe_api.h"
  22. #define DRV_VERSION "1.00"
  23. const char pch_driver_version[] = DRV_VERSION;
  24. #define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
  25. #define PCH_GBE_MAR_ENTRIES 16
  26. #define PCH_GBE_SHORT_PKT 64
  27. #define DSC_INIT16 0xC000
  28. #define PCH_GBE_DMA_ALIGN 0
  29. #define PCH_GBE_DMA_PADDING 2
  30. #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
  31. #define PCH_GBE_COPYBREAK_DEFAULT 256
  32. #define PCH_GBE_PCI_BAR 1
  33. #define PCH_GBE_TX_WEIGHT 64
  34. #define PCH_GBE_RX_WEIGHT 64
  35. #define PCH_GBE_RX_BUFFER_WRITE 16
  36. /* Initialize the wake-on-LAN settings */
  37. #define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
  38. #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
  39. PCH_GBE_CHIP_TYPE_INTERNAL | \
  40. PCH_GBE_RGMII_MODE_RGMII | \
  41. PCH_GBE_CRS_SEL \
  42. )
  43. /* Ethertype field values */
  44. #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
  45. #define PCH_GBE_FRAME_SIZE_2048 2048
  46. #define PCH_GBE_FRAME_SIZE_4096 4096
  47. #define PCH_GBE_FRAME_SIZE_8192 8192
  48. #define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
  49. #define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
  50. #define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
  51. #define PCH_GBE_DESC_UNUSED(R) \
  52. ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  53. (R)->next_to_clean - (R)->next_to_use - 1)
  54. /* Pause packet value */
  55. #define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
  56. #define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
  57. #define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
  58. #define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
  59. #define PCH_GBE_ETH_ALEN 6
  60. /* This defines the bits that are set in the Interrupt Mask
  61. * Set/Read Register. Each bit is documented below:
  62. * o RXT0 = Receiver Timer Interrupt (ring 0)
  63. * o TXDW = Transmit Descriptor Written Back
  64. * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
  65. * o RXSEQ = Receive Sequence Error
  66. * o LSC = Link Status Change
  67. */
  68. #define PCH_GBE_INT_ENABLE_MASK ( \
  69. PCH_GBE_INT_RX_DMA_CMPLT | \
  70. PCH_GBE_INT_RX_DSC_EMP | \
  71. PCH_GBE_INT_WOL_DET | \
  72. PCH_GBE_INT_TX_CMPLT \
  73. )
  74. static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
  75. static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
  76. static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
  77. int data);
  78. inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
  79. {
  80. iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
  81. }
  82. /**
  83. * pch_gbe_mac_read_mac_addr - Read MAC address
  84. * @hw: Pointer to the HW structure
  85. * Returns
  86. * 0: Successful.
  87. */
  88. s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
  89. {
  90. u32 adr1a, adr1b;
  91. adr1a = ioread32(&hw->reg->mac_adr[0].high);
  92. adr1b = ioread32(&hw->reg->mac_adr[0].low);
  93. hw->mac.addr[0] = (u8)(adr1a & 0xFF);
  94. hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
  95. hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
  96. hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
  97. hw->mac.addr[4] = (u8)(adr1b & 0xFF);
  98. hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
  99. pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
  100. return 0;
  101. }
  102. /**
  103. * pch_gbe_wait_clr_bit - Wait to clear a bit
  104. * @reg: Pointer of register
  105. * @busy: Busy bit
  106. */
  107. static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
  108. {
  109. u32 tmp;
  110. /* wait busy */
  111. tmp = 1000;
  112. while ((ioread32(reg) & bit) && --tmp)
  113. cpu_relax();
  114. if (!tmp)
  115. pr_err("Error: busy bit is not cleared\n");
  116. }
  117. /**
  118. * pch_gbe_mac_mar_set - Set MAC address register
  119. * @hw: Pointer to the HW structure
  120. * @addr: Pointer to the MAC address
  121. * @index: MAC address array register
  122. */
  123. static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
  124. {
  125. u32 mar_low, mar_high, adrmask;
  126. pr_debug("index : 0x%x\n", index);
  127. /*
  128. * HW expects these in little endian so we reverse the byte order
  129. * from network order (big endian) to little endian
  130. */
  131. mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
  132. ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
  133. mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
  134. /* Stop the MAC Address of index. */
  135. adrmask = ioread32(&hw->reg->ADDR_MASK);
  136. iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
  137. /* wait busy */
  138. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  139. /* Set the MAC address to the MAC address 1A/1B register */
  140. iowrite32(mar_high, &hw->reg->mac_adr[index].high);
  141. iowrite32(mar_low, &hw->reg->mac_adr[index].low);
  142. /* Start the MAC address of index */
  143. iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
  144. /* wait busy */
  145. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  146. }
  147. /**
  148. * pch_gbe_mac_reset_hw - Reset hardware
  149. * @hw: Pointer to the HW structure
  150. */
  151. static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
  152. {
  153. /* Read the MAC address. and store to the private data */
  154. pch_gbe_mac_read_mac_addr(hw);
  155. iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
  156. #ifdef PCH_GBE_MAC_IFOP_RGMII
  157. iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
  158. #endif
  159. pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
  160. /* Setup the receive address */
  161. pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
  162. return;
  163. }
  164. /**
  165. * pch_gbe_mac_init_rx_addrs - Initialize receive address's
  166. * @hw: Pointer to the HW structure
  167. * @mar_count: Receive address registers
  168. */
  169. static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
  170. {
  171. u32 i;
  172. /* Setup the receive address */
  173. pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
  174. /* Zero out the other receive addresses */
  175. for (i = 1; i < mar_count; i++) {
  176. iowrite32(0, &hw->reg->mac_adr[i].high);
  177. iowrite32(0, &hw->reg->mac_adr[i].low);
  178. }
  179. iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
  180. /* wait busy */
  181. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  182. }
  183. /**
  184. * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
  185. * @hw: Pointer to the HW structure
  186. * @mc_addr_list: Array of multicast addresses to program
  187. * @mc_addr_count: Number of multicast addresses to program
  188. * @mar_used_count: The first MAC Address register free to program
  189. * @mar_total_num: Total number of supported MAC Address Registers
  190. */
  191. static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
  192. u8 *mc_addr_list, u32 mc_addr_count,
  193. u32 mar_used_count, u32 mar_total_num)
  194. {
  195. u32 i, adrmask;
  196. /* Load the first set of multicast addresses into the exact
  197. * filters (RAR). If there are not enough to fill the RAR
  198. * array, clear the filters.
  199. */
  200. for (i = mar_used_count; i < mar_total_num; i++) {
  201. if (mc_addr_count) {
  202. pch_gbe_mac_mar_set(hw, mc_addr_list, i);
  203. mc_addr_count--;
  204. mc_addr_list += PCH_GBE_ETH_ALEN;
  205. } else {
  206. /* Clear MAC address mask */
  207. adrmask = ioread32(&hw->reg->ADDR_MASK);
  208. iowrite32((adrmask | (0x0001 << i)),
  209. &hw->reg->ADDR_MASK);
  210. /* wait busy */
  211. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  212. /* Clear MAC address */
  213. iowrite32(0, &hw->reg->mac_adr[i].high);
  214. iowrite32(0, &hw->reg->mac_adr[i].low);
  215. }
  216. }
  217. }
  218. /**
  219. * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
  220. * @hw: Pointer to the HW structure
  221. * Returns
  222. * 0: Successful.
  223. * Negative value: Failed.
  224. */
  225. s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
  226. {
  227. struct pch_gbe_mac_info *mac = &hw->mac;
  228. u32 rx_fctrl;
  229. pr_debug("mac->fc = %u\n", mac->fc);
  230. rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
  231. switch (mac->fc) {
  232. case PCH_GBE_FC_NONE:
  233. rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
  234. mac->tx_fc_enable = false;
  235. break;
  236. case PCH_GBE_FC_RX_PAUSE:
  237. rx_fctrl |= PCH_GBE_FL_CTRL_EN;
  238. mac->tx_fc_enable = false;
  239. break;
  240. case PCH_GBE_FC_TX_PAUSE:
  241. rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
  242. mac->tx_fc_enable = true;
  243. break;
  244. case PCH_GBE_FC_FULL:
  245. rx_fctrl |= PCH_GBE_FL_CTRL_EN;
  246. mac->tx_fc_enable = true;
  247. break;
  248. default:
  249. pr_err("Flow control param set incorrectly\n");
  250. return -EINVAL;
  251. }
  252. if (mac->link_duplex == DUPLEX_HALF)
  253. rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
  254. iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
  255. pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
  256. ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
  257. return 0;
  258. }
  259. /**
  260. * pch_gbe_mac_set_wol_event - Set wake-on-lan event
  261. * @hw: Pointer to the HW structure
  262. * @wu_evt: Wake up event
  263. */
  264. static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
  265. {
  266. u32 addr_mask;
  267. pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
  268. wu_evt, ioread32(&hw->reg->ADDR_MASK));
  269. if (wu_evt) {
  270. /* Set Wake-On-Lan address mask */
  271. addr_mask = ioread32(&hw->reg->ADDR_MASK);
  272. iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
  273. /* wait busy */
  274. pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
  275. iowrite32(0, &hw->reg->WOL_ST);
  276. iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
  277. iowrite32(0x02, &hw->reg->TCPIP_ACC);
  278. iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
  279. } else {
  280. iowrite32(0, &hw->reg->WOL_CTRL);
  281. iowrite32(0, &hw->reg->WOL_ST);
  282. }
  283. return;
  284. }
  285. /**
  286. * pch_gbe_mac_ctrl_miim - Control MIIM interface
  287. * @hw: Pointer to the HW structure
  288. * @addr: Address of PHY
  289. * @dir: Operetion. (Write or Read)
  290. * @reg: Access register of PHY
  291. * @data: Write data.
  292. *
  293. * Returns: Read date.
  294. */
  295. u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
  296. u16 data)
  297. {
  298. u32 data_out = 0;
  299. unsigned int i;
  300. unsigned long flags;
  301. spin_lock_irqsave(&hw->miim_lock, flags);
  302. for (i = 100; i; --i) {
  303. if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
  304. break;
  305. udelay(20);
  306. }
  307. if (i == 0) {
  308. pr_err("pch-gbe.miim won't go Ready\n");
  309. spin_unlock_irqrestore(&hw->miim_lock, flags);
  310. return 0; /* No way to indicate timeout error */
  311. }
  312. iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
  313. (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
  314. dir | data), &hw->reg->MIIM);
  315. for (i = 0; i < 100; i++) {
  316. udelay(20);
  317. data_out = ioread32(&hw->reg->MIIM);
  318. if ((data_out & PCH_GBE_MIIM_OPER_READY))
  319. break;
  320. }
  321. spin_unlock_irqrestore(&hw->miim_lock, flags);
  322. pr_debug("PHY %s: reg=%d, data=0x%04X\n",
  323. dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
  324. dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
  325. return (u16) data_out;
  326. }
  327. /**
  328. * pch_gbe_mac_set_pause_packet - Set pause packet
  329. * @hw: Pointer to the HW structure
  330. */
  331. static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
  332. {
  333. unsigned long tmp2, tmp3;
  334. /* Set Pause packet */
  335. tmp2 = hw->mac.addr[1];
  336. tmp2 = (tmp2 << 8) | hw->mac.addr[0];
  337. tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
  338. tmp3 = hw->mac.addr[5];
  339. tmp3 = (tmp3 << 8) | hw->mac.addr[4];
  340. tmp3 = (tmp3 << 8) | hw->mac.addr[3];
  341. tmp3 = (tmp3 << 8) | hw->mac.addr[2];
  342. iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
  343. iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
  344. iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
  345. iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
  346. iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
  347. /* Transmit Pause Packet */
  348. iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
  349. pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  350. ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
  351. ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
  352. ioread32(&hw->reg->PAUSE_PKT5));
  353. return;
  354. }
  355. /**
  356. * pch_gbe_alloc_queues - Allocate memory for all rings
  357. * @adapter: Board private structure to initialize
  358. * Returns
  359. * 0: Successfully
  360. * Negative value: Failed
  361. */
  362. static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
  363. {
  364. int size;
  365. size = (int)sizeof(struct pch_gbe_tx_ring);
  366. adapter->tx_ring = kzalloc(size, GFP_KERNEL);
  367. if (!adapter->tx_ring)
  368. return -ENOMEM;
  369. size = (int)sizeof(struct pch_gbe_rx_ring);
  370. adapter->rx_ring = kzalloc(size, GFP_KERNEL);
  371. if (!adapter->rx_ring) {
  372. kfree(adapter->tx_ring);
  373. return -ENOMEM;
  374. }
  375. return 0;
  376. }
  377. /**
  378. * pch_gbe_init_stats - Initialize status
  379. * @adapter: Board private structure to initialize
  380. */
  381. static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
  382. {
  383. memset(&adapter->stats, 0, sizeof(adapter->stats));
  384. return;
  385. }
  386. /**
  387. * pch_gbe_init_phy - Initialize PHY
  388. * @adapter: Board private structure to initialize
  389. * Returns
  390. * 0: Successfully
  391. * Negative value: Failed
  392. */
  393. static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
  394. {
  395. struct net_device *netdev = adapter->netdev;
  396. u32 addr;
  397. u16 bmcr, stat;
  398. /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
  399. for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
  400. adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
  401. bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
  402. stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
  403. stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
  404. if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
  405. break;
  406. }
  407. adapter->hw.phy.addr = adapter->mii.phy_id;
  408. pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
  409. if (addr == 32)
  410. return -EAGAIN;
  411. /* Selected the phy and isolate the rest */
  412. for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
  413. if (addr != adapter->mii.phy_id) {
  414. pch_gbe_mdio_write(netdev, addr, MII_BMCR,
  415. BMCR_ISOLATE);
  416. } else {
  417. bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
  418. pch_gbe_mdio_write(netdev, addr, MII_BMCR,
  419. bmcr & ~BMCR_ISOLATE);
  420. }
  421. }
  422. /* MII setup */
  423. adapter->mii.phy_id_mask = 0x1F;
  424. adapter->mii.reg_num_mask = 0x1F;
  425. adapter->mii.dev = adapter->netdev;
  426. adapter->mii.mdio_read = pch_gbe_mdio_read;
  427. adapter->mii.mdio_write = pch_gbe_mdio_write;
  428. adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
  429. return 0;
  430. }
  431. /**
  432. * pch_gbe_mdio_read - The read function for mii
  433. * @netdev: Network interface device structure
  434. * @addr: Phy ID
  435. * @reg: Access location
  436. * Returns
  437. * 0: Successfully
  438. * Negative value: Failed
  439. */
  440. static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
  441. {
  442. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  443. struct pch_gbe_hw *hw = &adapter->hw;
  444. return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
  445. (u16) 0);
  446. }
  447. /**
  448. * pch_gbe_mdio_write - The write function for mii
  449. * @netdev: Network interface device structure
  450. * @addr: Phy ID (not used)
  451. * @reg: Access location
  452. * @data: Write data
  453. */
  454. static void pch_gbe_mdio_write(struct net_device *netdev,
  455. int addr, int reg, int data)
  456. {
  457. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  458. struct pch_gbe_hw *hw = &adapter->hw;
  459. pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
  460. }
  461. /**
  462. * pch_gbe_reset_task - Reset processing at the time of transmission timeout
  463. * @work: Pointer of board private structure
  464. */
  465. static void pch_gbe_reset_task(struct work_struct *work)
  466. {
  467. struct pch_gbe_adapter *adapter;
  468. adapter = container_of(work, struct pch_gbe_adapter, reset_task);
  469. rtnl_lock();
  470. pch_gbe_reinit_locked(adapter);
  471. rtnl_unlock();
  472. }
  473. /**
  474. * pch_gbe_reinit_locked- Re-initialization
  475. * @adapter: Board private structure
  476. */
  477. void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
  478. {
  479. pch_gbe_down(adapter);
  480. pch_gbe_up(adapter);
  481. }
  482. /**
  483. * pch_gbe_reset - Reset GbE
  484. * @adapter: Board private structure
  485. */
  486. void pch_gbe_reset(struct pch_gbe_adapter *adapter)
  487. {
  488. pch_gbe_mac_reset_hw(&adapter->hw);
  489. /* Setup the receive address. */
  490. pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
  491. if (pch_gbe_hal_init_hw(&adapter->hw))
  492. pr_err("Hardware Error\n");
  493. }
  494. /**
  495. * pch_gbe_free_irq - Free an interrupt
  496. * @adapter: Board private structure
  497. */
  498. static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
  499. {
  500. struct net_device *netdev = adapter->netdev;
  501. free_irq(adapter->pdev->irq, netdev);
  502. if (adapter->have_msi) {
  503. pci_disable_msi(adapter->pdev);
  504. pr_debug("call pci_disable_msi\n");
  505. }
  506. }
  507. /**
  508. * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
  509. * @adapter: Board private structure
  510. */
  511. static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
  512. {
  513. struct pch_gbe_hw *hw = &adapter->hw;
  514. atomic_inc(&adapter->irq_sem);
  515. iowrite32(0, &hw->reg->INT_EN);
  516. ioread32(&hw->reg->INT_ST);
  517. synchronize_irq(adapter->pdev->irq);
  518. pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
  519. }
  520. /**
  521. * pch_gbe_irq_enable - Enable default interrupt generation settings
  522. * @adapter: Board private structure
  523. */
  524. static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
  525. {
  526. struct pch_gbe_hw *hw = &adapter->hw;
  527. if (likely(atomic_dec_and_test(&adapter->irq_sem)))
  528. iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
  529. ioread32(&hw->reg->INT_ST);
  530. pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
  531. }
  532. /**
  533. * pch_gbe_setup_tctl - configure the Transmit control registers
  534. * @adapter: Board private structure
  535. */
  536. static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
  537. {
  538. struct pch_gbe_hw *hw = &adapter->hw;
  539. u32 tx_mode, tcpip;
  540. tx_mode = PCH_GBE_TM_LONG_PKT |
  541. PCH_GBE_TM_ST_AND_FD |
  542. PCH_GBE_TM_SHORT_PKT |
  543. PCH_GBE_TM_TH_TX_STRT_8 |
  544. PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
  545. iowrite32(tx_mode, &hw->reg->TX_MODE);
  546. tcpip = ioread32(&hw->reg->TCPIP_ACC);
  547. tcpip |= PCH_GBE_TX_TCPIPACC_EN;
  548. iowrite32(tcpip, &hw->reg->TCPIP_ACC);
  549. return;
  550. }
  551. /**
  552. * pch_gbe_configure_tx - Configure Transmit Unit after Reset
  553. * @adapter: Board private structure
  554. */
  555. static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
  556. {
  557. struct pch_gbe_hw *hw = &adapter->hw;
  558. u32 tdba, tdlen, dctrl;
  559. pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
  560. (unsigned long long)adapter->tx_ring->dma,
  561. adapter->tx_ring->size);
  562. /* Setup the HW Tx Head and Tail descriptor pointers */
  563. tdba = adapter->tx_ring->dma;
  564. tdlen = adapter->tx_ring->size - 0x10;
  565. iowrite32(tdba, &hw->reg->TX_DSC_BASE);
  566. iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
  567. iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
  568. /* Enables Transmission DMA */
  569. dctrl = ioread32(&hw->reg->DMA_CTRL);
  570. dctrl |= PCH_GBE_TX_DMA_EN;
  571. iowrite32(dctrl, &hw->reg->DMA_CTRL);
  572. }
  573. /**
  574. * pch_gbe_setup_rctl - Configure the receive control registers
  575. * @adapter: Board private structure
  576. */
  577. static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
  578. {
  579. struct pch_gbe_hw *hw = &adapter->hw;
  580. u32 rx_mode, tcpip;
  581. rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
  582. PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
  583. iowrite32(rx_mode, &hw->reg->RX_MODE);
  584. tcpip = ioread32(&hw->reg->TCPIP_ACC);
  585. if (adapter->rx_csum) {
  586. tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
  587. tcpip |= PCH_GBE_RX_TCPIPACC_EN;
  588. } else {
  589. tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
  590. tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
  591. }
  592. iowrite32(tcpip, &hw->reg->TCPIP_ACC);
  593. return;
  594. }
  595. /**
  596. * pch_gbe_configure_rx - Configure Receive Unit after Reset
  597. * @adapter: Board private structure
  598. */
  599. static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
  600. {
  601. struct pch_gbe_hw *hw = &adapter->hw;
  602. u32 rdba, rdlen, rctl, rxdma;
  603. pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
  604. (unsigned long long)adapter->rx_ring->dma,
  605. adapter->rx_ring->size);
  606. pch_gbe_mac_force_mac_fc(hw);
  607. /* Disables Receive MAC */
  608. rctl = ioread32(&hw->reg->MAC_RX_EN);
  609. iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
  610. /* Disables Receive DMA */
  611. rxdma = ioread32(&hw->reg->DMA_CTRL);
  612. rxdma &= ~PCH_GBE_RX_DMA_EN;
  613. iowrite32(rxdma, &hw->reg->DMA_CTRL);
  614. pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
  615. ioread32(&hw->reg->MAC_RX_EN),
  616. ioread32(&hw->reg->DMA_CTRL));
  617. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  618. * the Base and Length of the Rx Descriptor Ring */
  619. rdba = adapter->rx_ring->dma;
  620. rdlen = adapter->rx_ring->size - 0x10;
  621. iowrite32(rdba, &hw->reg->RX_DSC_BASE);
  622. iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
  623. iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
  624. /* Enables Receive DMA */
  625. rxdma = ioread32(&hw->reg->DMA_CTRL);
  626. rxdma |= PCH_GBE_RX_DMA_EN;
  627. iowrite32(rxdma, &hw->reg->DMA_CTRL);
  628. /* Enables Receive */
  629. iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
  630. }
  631. /**
  632. * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
  633. * @adapter: Board private structure
  634. * @buffer_info: Buffer information structure
  635. */
  636. static void pch_gbe_unmap_and_free_tx_resource(
  637. struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
  638. {
  639. if (buffer_info->mapped) {
  640. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  641. buffer_info->length, DMA_TO_DEVICE);
  642. buffer_info->mapped = false;
  643. }
  644. if (buffer_info->skb) {
  645. dev_kfree_skb_any(buffer_info->skb);
  646. buffer_info->skb = NULL;
  647. }
  648. }
  649. /**
  650. * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
  651. * @adapter: Board private structure
  652. * @buffer_info: Buffer information structure
  653. */
  654. static void pch_gbe_unmap_and_free_rx_resource(
  655. struct pch_gbe_adapter *adapter,
  656. struct pch_gbe_buffer *buffer_info)
  657. {
  658. if (buffer_info->mapped) {
  659. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  660. buffer_info->length, DMA_FROM_DEVICE);
  661. buffer_info->mapped = false;
  662. }
  663. if (buffer_info->skb) {
  664. dev_kfree_skb_any(buffer_info->skb);
  665. buffer_info->skb = NULL;
  666. }
  667. }
  668. /**
  669. * pch_gbe_clean_tx_ring - Free Tx Buffers
  670. * @adapter: Board private structure
  671. * @tx_ring: Ring to be cleaned
  672. */
  673. static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
  674. struct pch_gbe_tx_ring *tx_ring)
  675. {
  676. struct pch_gbe_hw *hw = &adapter->hw;
  677. struct pch_gbe_buffer *buffer_info;
  678. unsigned long size;
  679. unsigned int i;
  680. /* Free all the Tx ring sk_buffs */
  681. for (i = 0; i < tx_ring->count; i++) {
  682. buffer_info = &tx_ring->buffer_info[i];
  683. pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
  684. }
  685. pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
  686. size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
  687. memset(tx_ring->buffer_info, 0, size);
  688. /* Zero out the descriptor ring */
  689. memset(tx_ring->desc, 0, tx_ring->size);
  690. tx_ring->next_to_use = 0;
  691. tx_ring->next_to_clean = 0;
  692. iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
  693. iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
  694. }
  695. /**
  696. * pch_gbe_clean_rx_ring - Free Rx Buffers
  697. * @adapter: Board private structure
  698. * @rx_ring: Ring to free buffers from
  699. */
  700. static void
  701. pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
  702. struct pch_gbe_rx_ring *rx_ring)
  703. {
  704. struct pch_gbe_hw *hw = &adapter->hw;
  705. struct pch_gbe_buffer *buffer_info;
  706. unsigned long size;
  707. unsigned int i;
  708. /* Free all the Rx ring sk_buffs */
  709. for (i = 0; i < rx_ring->count; i++) {
  710. buffer_info = &rx_ring->buffer_info[i];
  711. pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
  712. }
  713. pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
  714. size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
  715. memset(rx_ring->buffer_info, 0, size);
  716. /* Zero out the descriptor ring */
  717. memset(rx_ring->desc, 0, rx_ring->size);
  718. rx_ring->next_to_clean = 0;
  719. rx_ring->next_to_use = 0;
  720. iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
  721. iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
  722. }
  723. static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
  724. u16 duplex)
  725. {
  726. struct pch_gbe_hw *hw = &adapter->hw;
  727. unsigned long rgmii = 0;
  728. /* Set the RGMII control. */
  729. #ifdef PCH_GBE_MAC_IFOP_RGMII
  730. switch (speed) {
  731. case SPEED_10:
  732. rgmii = (PCH_GBE_RGMII_RATE_2_5M |
  733. PCH_GBE_MAC_RGMII_CTRL_SETTING);
  734. break;
  735. case SPEED_100:
  736. rgmii = (PCH_GBE_RGMII_RATE_25M |
  737. PCH_GBE_MAC_RGMII_CTRL_SETTING);
  738. break;
  739. case SPEED_1000:
  740. rgmii = (PCH_GBE_RGMII_RATE_125M |
  741. PCH_GBE_MAC_RGMII_CTRL_SETTING);
  742. break;
  743. }
  744. iowrite32(rgmii, &hw->reg->RGMII_CTRL);
  745. #else /* GMII */
  746. rgmii = 0;
  747. iowrite32(rgmii, &hw->reg->RGMII_CTRL);
  748. #endif
  749. }
  750. static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
  751. u16 duplex)
  752. {
  753. struct net_device *netdev = adapter->netdev;
  754. struct pch_gbe_hw *hw = &adapter->hw;
  755. unsigned long mode = 0;
  756. /* Set the communication mode */
  757. switch (speed) {
  758. case SPEED_10:
  759. mode = PCH_GBE_MODE_MII_ETHER;
  760. netdev->tx_queue_len = 10;
  761. break;
  762. case SPEED_100:
  763. mode = PCH_GBE_MODE_MII_ETHER;
  764. netdev->tx_queue_len = 100;
  765. break;
  766. case SPEED_1000:
  767. mode = PCH_GBE_MODE_GMII_ETHER;
  768. break;
  769. }
  770. if (duplex == DUPLEX_FULL)
  771. mode |= PCH_GBE_MODE_FULL_DUPLEX;
  772. else
  773. mode |= PCH_GBE_MODE_HALF_DUPLEX;
  774. iowrite32(mode, &hw->reg->MODE);
  775. }
  776. /**
  777. * pch_gbe_watchdog - Watchdog process
  778. * @data: Board private structure
  779. */
  780. static void pch_gbe_watchdog(unsigned long data)
  781. {
  782. struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
  783. struct net_device *netdev = adapter->netdev;
  784. struct pch_gbe_hw *hw = &adapter->hw;
  785. struct ethtool_cmd cmd;
  786. pr_debug("right now = %ld\n", jiffies);
  787. pch_gbe_update_stats(adapter);
  788. if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
  789. netdev->tx_queue_len = adapter->tx_queue_len;
  790. /* mii library handles link maintenance tasks */
  791. if (mii_ethtool_gset(&adapter->mii, &cmd)) {
  792. pr_err("ethtool get setting Error\n");
  793. mod_timer(&adapter->watchdog_timer,
  794. round_jiffies(jiffies +
  795. PCH_GBE_WATCHDOG_PERIOD));
  796. return;
  797. }
  798. hw->mac.link_speed = cmd.speed;
  799. hw->mac.link_duplex = cmd.duplex;
  800. /* Set the RGMII control. */
  801. pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
  802. hw->mac.link_duplex);
  803. /* Set the communication mode */
  804. pch_gbe_set_mode(adapter, hw->mac.link_speed,
  805. hw->mac.link_duplex);
  806. netdev_dbg(netdev,
  807. "Link is Up %d Mbps %s-Duplex\n",
  808. cmd.speed,
  809. cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
  810. netif_carrier_on(netdev);
  811. netif_wake_queue(netdev);
  812. } else if ((!mii_link_ok(&adapter->mii)) &&
  813. (netif_carrier_ok(netdev))) {
  814. netdev_dbg(netdev, "NIC Link is Down\n");
  815. hw->mac.link_speed = SPEED_10;
  816. hw->mac.link_duplex = DUPLEX_HALF;
  817. netif_carrier_off(netdev);
  818. netif_stop_queue(netdev);
  819. }
  820. mod_timer(&adapter->watchdog_timer,
  821. round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
  822. }
  823. /**
  824. * pch_gbe_tx_queue - Carry out queuing of the transmission data
  825. * @adapter: Board private structure
  826. * @tx_ring: Tx descriptor ring structure
  827. * @skb: Sockt buffer structure
  828. */
  829. static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
  830. struct pch_gbe_tx_ring *tx_ring,
  831. struct sk_buff *skb)
  832. {
  833. struct pch_gbe_hw *hw = &adapter->hw;
  834. struct pch_gbe_tx_desc *tx_desc;
  835. struct pch_gbe_buffer *buffer_info;
  836. struct sk_buff *tmp_skb;
  837. unsigned int frame_ctrl;
  838. unsigned int ring_num;
  839. unsigned long flags;
  840. /*-- Set frame control --*/
  841. frame_ctrl = 0;
  842. if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
  843. frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
  844. if (unlikely(!adapter->tx_csum))
  845. frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
  846. /* Performs checksum processing */
  847. /*
  848. * It is because the hardware accelerator does not support a checksum,
  849. * when the received data size is less than 64 bytes.
  850. */
  851. if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) {
  852. frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
  853. PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
  854. if (skb->protocol == htons(ETH_P_IP)) {
  855. struct iphdr *iph = ip_hdr(skb);
  856. unsigned int offset;
  857. iph->check = 0;
  858. iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
  859. offset = skb_transport_offset(skb);
  860. if (iph->protocol == IPPROTO_TCP) {
  861. skb->csum = 0;
  862. tcp_hdr(skb)->check = 0;
  863. skb->csum = skb_checksum(skb, offset,
  864. skb->len - offset, 0);
  865. tcp_hdr(skb)->check =
  866. csum_tcpudp_magic(iph->saddr,
  867. iph->daddr,
  868. skb->len - offset,
  869. IPPROTO_TCP,
  870. skb->csum);
  871. } else if (iph->protocol == IPPROTO_UDP) {
  872. skb->csum = 0;
  873. udp_hdr(skb)->check = 0;
  874. skb->csum =
  875. skb_checksum(skb, offset,
  876. skb->len - offset, 0);
  877. udp_hdr(skb)->check =
  878. csum_tcpudp_magic(iph->saddr,
  879. iph->daddr,
  880. skb->len - offset,
  881. IPPROTO_UDP,
  882. skb->csum);
  883. }
  884. }
  885. }
  886. spin_lock_irqsave(&tx_ring->tx_lock, flags);
  887. ring_num = tx_ring->next_to_use;
  888. if (unlikely((ring_num + 1) == tx_ring->count))
  889. tx_ring->next_to_use = 0;
  890. else
  891. tx_ring->next_to_use = ring_num + 1;
  892. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  893. buffer_info = &tx_ring->buffer_info[ring_num];
  894. tmp_skb = buffer_info->skb;
  895. /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
  896. memcpy(tmp_skb->data, skb->data, ETH_HLEN);
  897. tmp_skb->data[ETH_HLEN] = 0x00;
  898. tmp_skb->data[ETH_HLEN + 1] = 0x00;
  899. tmp_skb->len = skb->len;
  900. memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
  901. (skb->len - ETH_HLEN));
  902. /*-- Set Buffer infomation --*/
  903. buffer_info->length = tmp_skb->len;
  904. buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
  905. buffer_info->length,
  906. DMA_TO_DEVICE);
  907. if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
  908. pr_err("TX DMA map failed\n");
  909. buffer_info->dma = 0;
  910. buffer_info->time_stamp = 0;
  911. tx_ring->next_to_use = ring_num;
  912. return;
  913. }
  914. buffer_info->mapped = true;
  915. buffer_info->time_stamp = jiffies;
  916. /*-- Set Tx descriptor --*/
  917. tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
  918. tx_desc->buffer_addr = (buffer_info->dma);
  919. tx_desc->length = (tmp_skb->len);
  920. tx_desc->tx_words_eob = ((tmp_skb->len + 3));
  921. tx_desc->tx_frame_ctrl = (frame_ctrl);
  922. tx_desc->gbec_status = (DSC_INIT16);
  923. if (unlikely(++ring_num == tx_ring->count))
  924. ring_num = 0;
  925. /* Update software pointer of TX descriptor */
  926. iowrite32(tx_ring->dma +
  927. (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
  928. &hw->reg->TX_DSC_SW_P);
  929. dev_kfree_skb_any(skb);
  930. }
  931. /**
  932. * pch_gbe_update_stats - Update the board statistics counters
  933. * @adapter: Board private structure
  934. */
  935. void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
  936. {
  937. struct net_device *netdev = adapter->netdev;
  938. struct pci_dev *pdev = adapter->pdev;
  939. struct pch_gbe_hw_stats *stats = &adapter->stats;
  940. unsigned long flags;
  941. /*
  942. * Prevent stats update while adapter is being reset, or if the pci
  943. * connection is down.
  944. */
  945. if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
  946. return;
  947. spin_lock_irqsave(&adapter->stats_lock, flags);
  948. /* Update device status "adapter->stats" */
  949. stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
  950. stats->tx_errors = stats->tx_length_errors +
  951. stats->tx_aborted_errors +
  952. stats->tx_carrier_errors + stats->tx_timeout_count;
  953. /* Update network device status "adapter->net_stats" */
  954. netdev->stats.rx_packets = stats->rx_packets;
  955. netdev->stats.rx_bytes = stats->rx_bytes;
  956. netdev->stats.rx_dropped = stats->rx_dropped;
  957. netdev->stats.tx_packets = stats->tx_packets;
  958. netdev->stats.tx_bytes = stats->tx_bytes;
  959. netdev->stats.tx_dropped = stats->tx_dropped;
  960. /* Fill out the OS statistics structure */
  961. netdev->stats.multicast = stats->multicast;
  962. netdev->stats.collisions = stats->collisions;
  963. /* Rx Errors */
  964. netdev->stats.rx_errors = stats->rx_errors;
  965. netdev->stats.rx_crc_errors = stats->rx_crc_errors;
  966. netdev->stats.rx_frame_errors = stats->rx_frame_errors;
  967. /* Tx Errors */
  968. netdev->stats.tx_errors = stats->tx_errors;
  969. netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
  970. netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
  971. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  972. }
  973. /**
  974. * pch_gbe_intr - Interrupt Handler
  975. * @irq: Interrupt number
  976. * @data: Pointer to a network interface device structure
  977. * Returns
  978. * - IRQ_HANDLED: Our interrupt
  979. * - IRQ_NONE: Not our interrupt
  980. */
  981. static irqreturn_t pch_gbe_intr(int irq, void *data)
  982. {
  983. struct net_device *netdev = data;
  984. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  985. struct pch_gbe_hw *hw = &adapter->hw;
  986. u32 int_st;
  987. u32 int_en;
  988. /* Check request status */
  989. int_st = ioread32(&hw->reg->INT_ST);
  990. int_st = int_st & ioread32(&hw->reg->INT_EN);
  991. /* When request status is no interruption factor */
  992. if (unlikely(!int_st))
  993. return IRQ_NONE; /* Not our interrupt. End processing. */
  994. pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
  995. if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
  996. adapter->stats.intr_rx_frame_err_count++;
  997. if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
  998. adapter->stats.intr_rx_fifo_err_count++;
  999. if (int_st & PCH_GBE_INT_RX_DMA_ERR)
  1000. adapter->stats.intr_rx_dma_err_count++;
  1001. if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
  1002. adapter->stats.intr_tx_fifo_err_count++;
  1003. if (int_st & PCH_GBE_INT_TX_DMA_ERR)
  1004. adapter->stats.intr_tx_dma_err_count++;
  1005. if (int_st & PCH_GBE_INT_TCPIP_ERR)
  1006. adapter->stats.intr_tcpip_err_count++;
  1007. /* When Rx descriptor is empty */
  1008. if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
  1009. adapter->stats.intr_rx_dsc_empty_count++;
  1010. pr_err("Rx descriptor is empty\n");
  1011. int_en = ioread32(&hw->reg->INT_EN);
  1012. iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
  1013. if (hw->mac.tx_fc_enable) {
  1014. /* Set Pause packet */
  1015. pch_gbe_mac_set_pause_packet(hw);
  1016. }
  1017. if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
  1018. == 0) {
  1019. return IRQ_HANDLED;
  1020. }
  1021. }
  1022. /* When request status is Receive interruption */
  1023. if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
  1024. if (likely(napi_schedule_prep(&adapter->napi))) {
  1025. /* Enable only Rx Descriptor empty */
  1026. atomic_inc(&adapter->irq_sem);
  1027. int_en = ioread32(&hw->reg->INT_EN);
  1028. int_en &=
  1029. ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
  1030. iowrite32(int_en, &hw->reg->INT_EN);
  1031. /* Start polling for NAPI */
  1032. __napi_schedule(&adapter->napi);
  1033. }
  1034. }
  1035. pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
  1036. IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
  1037. return IRQ_HANDLED;
  1038. }
  1039. /**
  1040. * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  1041. * @adapter: Board private structure
  1042. * @rx_ring: Rx descriptor ring
  1043. * @cleaned_count: Cleaned count
  1044. */
  1045. static void
  1046. pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
  1047. struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
  1048. {
  1049. struct net_device *netdev = adapter->netdev;
  1050. struct pci_dev *pdev = adapter->pdev;
  1051. struct pch_gbe_hw *hw = &adapter->hw;
  1052. struct pch_gbe_rx_desc *rx_desc;
  1053. struct pch_gbe_buffer *buffer_info;
  1054. struct sk_buff *skb;
  1055. unsigned int i;
  1056. unsigned int bufsz;
  1057. bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
  1058. i = rx_ring->next_to_use;
  1059. while ((cleaned_count--)) {
  1060. buffer_info = &rx_ring->buffer_info[i];
  1061. skb = buffer_info->skb;
  1062. if (skb) {
  1063. skb_trim(skb, 0);
  1064. } else {
  1065. skb = netdev_alloc_skb(netdev, bufsz);
  1066. if (unlikely(!skb)) {
  1067. /* Better luck next round */
  1068. adapter->stats.rx_alloc_buff_failed++;
  1069. break;
  1070. }
  1071. /* 64byte align */
  1072. skb_reserve(skb, PCH_GBE_DMA_ALIGN);
  1073. buffer_info->skb = skb;
  1074. buffer_info->length = adapter->rx_buffer_len;
  1075. }
  1076. buffer_info->dma = dma_map_single(&pdev->dev,
  1077. skb->data,
  1078. buffer_info->length,
  1079. DMA_FROM_DEVICE);
  1080. if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
  1081. dev_kfree_skb(skb);
  1082. buffer_info->skb = NULL;
  1083. buffer_info->dma = 0;
  1084. adapter->stats.rx_alloc_buff_failed++;
  1085. break; /* while !buffer_info->skb */
  1086. }
  1087. buffer_info->mapped = true;
  1088. rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
  1089. rx_desc->buffer_addr = (buffer_info->dma);
  1090. rx_desc->gbec_status = DSC_INIT16;
  1091. pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
  1092. i, (unsigned long long)buffer_info->dma,
  1093. buffer_info->length);
  1094. if (unlikely(++i == rx_ring->count))
  1095. i = 0;
  1096. }
  1097. if (likely(rx_ring->next_to_use != i)) {
  1098. rx_ring->next_to_use = i;
  1099. if (unlikely(i-- == 0))
  1100. i = (rx_ring->count - 1);
  1101. iowrite32(rx_ring->dma +
  1102. (int)sizeof(struct pch_gbe_rx_desc) * i,
  1103. &hw->reg->RX_DSC_SW_P);
  1104. }
  1105. return;
  1106. }
  1107. /**
  1108. * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
  1109. * @adapter: Board private structure
  1110. * @tx_ring: Tx descriptor ring
  1111. */
  1112. static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
  1113. struct pch_gbe_tx_ring *tx_ring)
  1114. {
  1115. struct pch_gbe_buffer *buffer_info;
  1116. struct sk_buff *skb;
  1117. unsigned int i;
  1118. unsigned int bufsz;
  1119. struct pch_gbe_tx_desc *tx_desc;
  1120. bufsz =
  1121. adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
  1122. for (i = 0; i < tx_ring->count; i++) {
  1123. buffer_info = &tx_ring->buffer_info[i];
  1124. skb = netdev_alloc_skb(adapter->netdev, bufsz);
  1125. skb_reserve(skb, PCH_GBE_DMA_ALIGN);
  1126. buffer_info->skb = skb;
  1127. tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
  1128. tx_desc->gbec_status = (DSC_INIT16);
  1129. }
  1130. return;
  1131. }
  1132. /**
  1133. * pch_gbe_clean_tx - Reclaim resources after transmit completes
  1134. * @adapter: Board private structure
  1135. * @tx_ring: Tx descriptor ring
  1136. * Returns
  1137. * true: Cleaned the descriptor
  1138. * false: Not cleaned the descriptor
  1139. */
  1140. static bool
  1141. pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
  1142. struct pch_gbe_tx_ring *tx_ring)
  1143. {
  1144. struct pch_gbe_tx_desc *tx_desc;
  1145. struct pch_gbe_buffer *buffer_info;
  1146. struct sk_buff *skb;
  1147. unsigned int i;
  1148. unsigned int cleaned_count = 0;
  1149. bool cleaned = false;
  1150. pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
  1151. i = tx_ring->next_to_clean;
  1152. tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
  1153. pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
  1154. tx_desc->gbec_status, tx_desc->dma_status);
  1155. while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
  1156. pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
  1157. cleaned = true;
  1158. buffer_info = &tx_ring->buffer_info[i];
  1159. skb = buffer_info->skb;
  1160. if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
  1161. adapter->stats.tx_aborted_errors++;
  1162. pr_err("Transfer Abort Error\n");
  1163. } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
  1164. ) {
  1165. adapter->stats.tx_carrier_errors++;
  1166. pr_err("Transfer Carrier Sense Error\n");
  1167. } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
  1168. ) {
  1169. adapter->stats.tx_aborted_errors++;
  1170. pr_err("Transfer Collision Abort Error\n");
  1171. } else if ((tx_desc->gbec_status &
  1172. (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
  1173. PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
  1174. adapter->stats.collisions++;
  1175. adapter->stats.tx_packets++;
  1176. adapter->stats.tx_bytes += skb->len;
  1177. pr_debug("Transfer Collision\n");
  1178. } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
  1179. ) {
  1180. adapter->stats.tx_packets++;
  1181. adapter->stats.tx_bytes += skb->len;
  1182. }
  1183. if (buffer_info->mapped) {
  1184. pr_debug("unmap buffer_info->dma : %d\n", i);
  1185. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  1186. buffer_info->length, DMA_TO_DEVICE);
  1187. buffer_info->mapped = false;
  1188. }
  1189. if (buffer_info->skb) {
  1190. pr_debug("trim buffer_info->skb : %d\n", i);
  1191. skb_trim(buffer_info->skb, 0);
  1192. }
  1193. tx_desc->gbec_status = DSC_INIT16;
  1194. if (unlikely(++i == tx_ring->count))
  1195. i = 0;
  1196. tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
  1197. /* weight of a sort for tx, to avoid endless transmit cleanup */
  1198. if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
  1199. break;
  1200. }
  1201. pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
  1202. cleaned_count);
  1203. /* Recover from running out of Tx resources in xmit_frame */
  1204. if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
  1205. netif_wake_queue(adapter->netdev);
  1206. adapter->stats.tx_restart_count++;
  1207. pr_debug("Tx wake queue\n");
  1208. }
  1209. spin_lock(&adapter->tx_queue_lock);
  1210. tx_ring->next_to_clean = i;
  1211. spin_unlock(&adapter->tx_queue_lock);
  1212. pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
  1213. return cleaned;
  1214. }
  1215. /**
  1216. * pch_gbe_clean_rx - Send received data up the network stack; legacy
  1217. * @adapter: Board private structure
  1218. * @rx_ring: Rx descriptor ring
  1219. * @work_done: Completed count
  1220. * @work_to_do: Request count
  1221. * Returns
  1222. * true: Cleaned the descriptor
  1223. * false: Not cleaned the descriptor
  1224. */
  1225. static bool
  1226. pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
  1227. struct pch_gbe_rx_ring *rx_ring,
  1228. int *work_done, int work_to_do)
  1229. {
  1230. struct net_device *netdev = adapter->netdev;
  1231. struct pci_dev *pdev = adapter->pdev;
  1232. struct pch_gbe_buffer *buffer_info;
  1233. struct pch_gbe_rx_desc *rx_desc;
  1234. u32 length;
  1235. unsigned int i;
  1236. unsigned int cleaned_count = 0;
  1237. bool cleaned = false;
  1238. struct sk_buff *skb, *new_skb;
  1239. u8 dma_status;
  1240. u16 gbec_status;
  1241. u32 tcp_ip_status;
  1242. i = rx_ring->next_to_clean;
  1243. while (*work_done < work_to_do) {
  1244. /* Check Rx descriptor status */
  1245. rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
  1246. if (rx_desc->gbec_status == DSC_INIT16)
  1247. break;
  1248. cleaned = true;
  1249. cleaned_count++;
  1250. dma_status = rx_desc->dma_status;
  1251. gbec_status = rx_desc->gbec_status;
  1252. tcp_ip_status = rx_desc->tcp_ip_status;
  1253. rx_desc->gbec_status = DSC_INIT16;
  1254. buffer_info = &rx_ring->buffer_info[i];
  1255. skb = buffer_info->skb;
  1256. /* unmap dma */
  1257. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1258. buffer_info->length, DMA_FROM_DEVICE);
  1259. buffer_info->mapped = false;
  1260. /* Prefetch the packet */
  1261. prefetch(skb->data);
  1262. pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
  1263. "TCP:0x%08x] BufInf = 0x%p\n",
  1264. i, dma_status, gbec_status, tcp_ip_status,
  1265. buffer_info);
  1266. /* Error check */
  1267. if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
  1268. adapter->stats.rx_frame_errors++;
  1269. pr_err("Receive Not Octal Error\n");
  1270. } else if (unlikely(gbec_status &
  1271. PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
  1272. adapter->stats.rx_frame_errors++;
  1273. pr_err("Receive Nibble Error\n");
  1274. } else if (unlikely(gbec_status &
  1275. PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
  1276. adapter->stats.rx_crc_errors++;
  1277. pr_err("Receive CRC Error\n");
  1278. } else {
  1279. /* get receive length */
  1280. /* length convert[-3] */
  1281. length = (rx_desc->rx_words_eob) - 3;
  1282. /* Decide the data conversion method */
  1283. if (!adapter->rx_csum) {
  1284. /* [Header:14][payload] */
  1285. if (NET_IP_ALIGN) {
  1286. /* Because alignment differs,
  1287. * the new_skb is newly allocated,
  1288. * and data is copied to new_skb.*/
  1289. new_skb = netdev_alloc_skb(netdev,
  1290. length + NET_IP_ALIGN);
  1291. if (!new_skb) {
  1292. /* dorrop error */
  1293. pr_err("New skb allocation "
  1294. "Error\n");
  1295. goto dorrop;
  1296. }
  1297. skb_reserve(new_skb, NET_IP_ALIGN);
  1298. memcpy(new_skb->data, skb->data,
  1299. length);
  1300. skb = new_skb;
  1301. } else {
  1302. /* DMA buffer is used as SKB as it is.*/
  1303. buffer_info->skb = NULL;
  1304. }
  1305. } else {
  1306. /* [Header:14][padding:2][payload] */
  1307. /* The length includes padding length */
  1308. length = length - PCH_GBE_DMA_PADDING;
  1309. if ((length < copybreak) ||
  1310. (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
  1311. /* Because alignment differs,
  1312. * the new_skb is newly allocated,
  1313. * and data is copied to new_skb.
  1314. * Padding data is deleted
  1315. * at the time of a copy.*/
  1316. new_skb = netdev_alloc_skb(netdev,
  1317. length + NET_IP_ALIGN);
  1318. if (!new_skb) {
  1319. /* dorrop error */
  1320. pr_err("New skb allocation "
  1321. "Error\n");
  1322. goto dorrop;
  1323. }
  1324. skb_reserve(new_skb, NET_IP_ALIGN);
  1325. memcpy(new_skb->data, skb->data,
  1326. ETH_HLEN);
  1327. memcpy(&new_skb->data[ETH_HLEN],
  1328. &skb->data[ETH_HLEN +
  1329. PCH_GBE_DMA_PADDING],
  1330. length - ETH_HLEN);
  1331. skb = new_skb;
  1332. } else {
  1333. /* Padding data is deleted
  1334. * by moving header data.*/
  1335. memmove(&skb->data[PCH_GBE_DMA_PADDING],
  1336. &skb->data[0], ETH_HLEN);
  1337. skb_reserve(skb, NET_IP_ALIGN);
  1338. buffer_info->skb = NULL;
  1339. }
  1340. }
  1341. /* The length includes FCS length */
  1342. length = length - ETH_FCS_LEN;
  1343. /* update status of driver */
  1344. adapter->stats.rx_bytes += length;
  1345. adapter->stats.rx_packets++;
  1346. if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
  1347. adapter->stats.multicast++;
  1348. /* Write meta date of skb */
  1349. skb_put(skb, length);
  1350. skb->protocol = eth_type_trans(skb, netdev);
  1351. if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
  1352. PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
  1353. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1354. } else {
  1355. skb->ip_summed = CHECKSUM_NONE;
  1356. }
  1357. napi_gro_receive(&adapter->napi, skb);
  1358. (*work_done)++;
  1359. pr_debug("Receive skb->ip_summed: %d length: %d\n",
  1360. skb->ip_summed, length);
  1361. }
  1362. dorrop:
  1363. /* return some buffers to hardware, one at a time is too slow */
  1364. if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
  1365. pch_gbe_alloc_rx_buffers(adapter, rx_ring,
  1366. cleaned_count);
  1367. cleaned_count = 0;
  1368. }
  1369. if (++i == rx_ring->count)
  1370. i = 0;
  1371. }
  1372. rx_ring->next_to_clean = i;
  1373. if (cleaned_count)
  1374. pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  1375. return cleaned;
  1376. }
  1377. /**
  1378. * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
  1379. * @adapter: Board private structure
  1380. * @tx_ring: Tx descriptor ring (for a specific queue) to setup
  1381. * Returns
  1382. * 0: Successfully
  1383. * Negative value: Failed
  1384. */
  1385. int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
  1386. struct pch_gbe_tx_ring *tx_ring)
  1387. {
  1388. struct pci_dev *pdev = adapter->pdev;
  1389. struct pch_gbe_tx_desc *tx_desc;
  1390. int size;
  1391. int desNo;
  1392. size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
  1393. tx_ring->buffer_info = vzalloc(size);
  1394. if (!tx_ring->buffer_info) {
  1395. pr_err("Unable to allocate memory for the buffer infomation\n");
  1396. return -ENOMEM;
  1397. }
  1398. tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
  1399. tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
  1400. &tx_ring->dma, GFP_KERNEL);
  1401. if (!tx_ring->desc) {
  1402. vfree(tx_ring->buffer_info);
  1403. pr_err("Unable to allocate memory for the transmit descriptor ring\n");
  1404. return -ENOMEM;
  1405. }
  1406. memset(tx_ring->desc, 0, tx_ring->size);
  1407. tx_ring->next_to_use = 0;
  1408. tx_ring->next_to_clean = 0;
  1409. spin_lock_init(&tx_ring->tx_lock);
  1410. for (desNo = 0; desNo < tx_ring->count; desNo++) {
  1411. tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
  1412. tx_desc->gbec_status = DSC_INIT16;
  1413. }
  1414. pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
  1415. "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
  1416. tx_ring->desc, (unsigned long long)tx_ring->dma,
  1417. tx_ring->next_to_clean, tx_ring->next_to_use);
  1418. return 0;
  1419. }
  1420. /**
  1421. * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
  1422. * @adapter: Board private structure
  1423. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  1424. * Returns
  1425. * 0: Successfully
  1426. * Negative value: Failed
  1427. */
  1428. int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
  1429. struct pch_gbe_rx_ring *rx_ring)
  1430. {
  1431. struct pci_dev *pdev = adapter->pdev;
  1432. struct pch_gbe_rx_desc *rx_desc;
  1433. int size;
  1434. int desNo;
  1435. size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
  1436. rx_ring->buffer_info = vzalloc(size);
  1437. if (!rx_ring->buffer_info) {
  1438. pr_err("Unable to allocate memory for the receive descriptor ring\n");
  1439. return -ENOMEM;
  1440. }
  1441. rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
  1442. rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
  1443. &rx_ring->dma, GFP_KERNEL);
  1444. if (!rx_ring->desc) {
  1445. pr_err("Unable to allocate memory for the receive descriptor ring\n");
  1446. vfree(rx_ring->buffer_info);
  1447. return -ENOMEM;
  1448. }
  1449. memset(rx_ring->desc, 0, rx_ring->size);
  1450. rx_ring->next_to_clean = 0;
  1451. rx_ring->next_to_use = 0;
  1452. for (desNo = 0; desNo < rx_ring->count; desNo++) {
  1453. rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
  1454. rx_desc->gbec_status = DSC_INIT16;
  1455. }
  1456. pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
  1457. "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
  1458. rx_ring->desc, (unsigned long long)rx_ring->dma,
  1459. rx_ring->next_to_clean, rx_ring->next_to_use);
  1460. return 0;
  1461. }
  1462. /**
  1463. * pch_gbe_free_tx_resources - Free Tx Resources
  1464. * @adapter: Board private structure
  1465. * @tx_ring: Tx descriptor ring for a specific queue
  1466. */
  1467. void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
  1468. struct pch_gbe_tx_ring *tx_ring)
  1469. {
  1470. struct pci_dev *pdev = adapter->pdev;
  1471. pch_gbe_clean_tx_ring(adapter, tx_ring);
  1472. vfree(tx_ring->buffer_info);
  1473. tx_ring->buffer_info = NULL;
  1474. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  1475. tx_ring->desc = NULL;
  1476. }
  1477. /**
  1478. * pch_gbe_free_rx_resources - Free Rx Resources
  1479. * @adapter: Board private structure
  1480. * @rx_ring: Ring to clean the resources from
  1481. */
  1482. void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
  1483. struct pch_gbe_rx_ring *rx_ring)
  1484. {
  1485. struct pci_dev *pdev = adapter->pdev;
  1486. pch_gbe_clean_rx_ring(adapter, rx_ring);
  1487. vfree(rx_ring->buffer_info);
  1488. rx_ring->buffer_info = NULL;
  1489. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  1490. rx_ring->desc = NULL;
  1491. }
  1492. /**
  1493. * pch_gbe_request_irq - Allocate an interrupt line
  1494. * @adapter: Board private structure
  1495. * Returns
  1496. * 0: Successfully
  1497. * Negative value: Failed
  1498. */
  1499. static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
  1500. {
  1501. struct net_device *netdev = adapter->netdev;
  1502. int err;
  1503. int flags;
  1504. flags = IRQF_SHARED;
  1505. adapter->have_msi = false;
  1506. err = pci_enable_msi(adapter->pdev);
  1507. pr_debug("call pci_enable_msi\n");
  1508. if (err) {
  1509. pr_debug("call pci_enable_msi - Error: %d\n", err);
  1510. } else {
  1511. flags = 0;
  1512. adapter->have_msi = true;
  1513. }
  1514. err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
  1515. flags, netdev->name, netdev);
  1516. if (err)
  1517. pr_err("Unable to allocate interrupt Error: %d\n", err);
  1518. pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
  1519. adapter->have_msi, flags, err);
  1520. return err;
  1521. }
  1522. static void pch_gbe_set_multi(struct net_device *netdev);
  1523. /**
  1524. * pch_gbe_up - Up GbE network device
  1525. * @adapter: Board private structure
  1526. * Returns
  1527. * 0: Successfully
  1528. * Negative value: Failed
  1529. */
  1530. int pch_gbe_up(struct pch_gbe_adapter *adapter)
  1531. {
  1532. struct net_device *netdev = adapter->netdev;
  1533. struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
  1534. struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
  1535. int err;
  1536. /* hardware has been reset, we need to reload some things */
  1537. pch_gbe_set_multi(netdev);
  1538. pch_gbe_setup_tctl(adapter);
  1539. pch_gbe_configure_tx(adapter);
  1540. pch_gbe_setup_rctl(adapter);
  1541. pch_gbe_configure_rx(adapter);
  1542. err = pch_gbe_request_irq(adapter);
  1543. if (err) {
  1544. pr_err("Error: can't bring device up\n");
  1545. return err;
  1546. }
  1547. pch_gbe_alloc_tx_buffers(adapter, tx_ring);
  1548. pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
  1549. adapter->tx_queue_len = netdev->tx_queue_len;
  1550. mod_timer(&adapter->watchdog_timer, jiffies);
  1551. napi_enable(&adapter->napi);
  1552. pch_gbe_irq_enable(adapter);
  1553. netif_start_queue(adapter->netdev);
  1554. return 0;
  1555. }
  1556. /**
  1557. * pch_gbe_down - Down GbE network device
  1558. * @adapter: Board private structure
  1559. */
  1560. void pch_gbe_down(struct pch_gbe_adapter *adapter)
  1561. {
  1562. struct net_device *netdev = adapter->netdev;
  1563. /* signal that we're down so the interrupt handler does not
  1564. * reschedule our watchdog timer */
  1565. napi_disable(&adapter->napi);
  1566. atomic_set(&adapter->irq_sem, 0);
  1567. pch_gbe_irq_disable(adapter);
  1568. pch_gbe_free_irq(adapter);
  1569. del_timer_sync(&adapter->watchdog_timer);
  1570. netdev->tx_queue_len = adapter->tx_queue_len;
  1571. netif_carrier_off(netdev);
  1572. netif_stop_queue(netdev);
  1573. pch_gbe_reset(adapter);
  1574. pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
  1575. pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
  1576. }
  1577. /**
  1578. * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
  1579. * @adapter: Board private structure to initialize
  1580. * Returns
  1581. * 0: Successfully
  1582. * Negative value: Failed
  1583. */
  1584. static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
  1585. {
  1586. struct pch_gbe_hw *hw = &adapter->hw;
  1587. struct net_device *netdev = adapter->netdev;
  1588. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
  1589. hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1590. hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  1591. /* Initialize the hardware-specific values */
  1592. if (pch_gbe_hal_setup_init_funcs(hw)) {
  1593. pr_err("Hardware Initialization Failure\n");
  1594. return -EIO;
  1595. }
  1596. if (pch_gbe_alloc_queues(adapter)) {
  1597. pr_err("Unable to allocate memory for queues\n");
  1598. return -ENOMEM;
  1599. }
  1600. spin_lock_init(&adapter->hw.miim_lock);
  1601. spin_lock_init(&adapter->tx_queue_lock);
  1602. spin_lock_init(&adapter->stats_lock);
  1603. spin_lock_init(&adapter->ethtool_lock);
  1604. atomic_set(&adapter->irq_sem, 0);
  1605. pch_gbe_irq_disable(adapter);
  1606. pch_gbe_init_stats(adapter);
  1607. pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
  1608. (u32) adapter->rx_buffer_len,
  1609. hw->mac.min_frame_size, hw->mac.max_frame_size);
  1610. return 0;
  1611. }
  1612. /**
  1613. * pch_gbe_open - Called when a network interface is made active
  1614. * @netdev: Network interface device structure
  1615. * Returns
  1616. * 0: Successfully
  1617. * Negative value: Failed
  1618. */
  1619. static int pch_gbe_open(struct net_device *netdev)
  1620. {
  1621. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1622. struct pch_gbe_hw *hw = &adapter->hw;
  1623. int err;
  1624. /* allocate transmit descriptors */
  1625. err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
  1626. if (err)
  1627. goto err_setup_tx;
  1628. /* allocate receive descriptors */
  1629. err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
  1630. if (err)
  1631. goto err_setup_rx;
  1632. pch_gbe_hal_power_up_phy(hw);
  1633. err = pch_gbe_up(adapter);
  1634. if (err)
  1635. goto err_up;
  1636. pr_debug("Success End\n");
  1637. return 0;
  1638. err_up:
  1639. if (!adapter->wake_up_evt)
  1640. pch_gbe_hal_power_down_phy(hw);
  1641. pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
  1642. err_setup_rx:
  1643. pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
  1644. err_setup_tx:
  1645. pch_gbe_reset(adapter);
  1646. pr_err("Error End\n");
  1647. return err;
  1648. }
  1649. /**
  1650. * pch_gbe_stop - Disables a network interface
  1651. * @netdev: Network interface device structure
  1652. * Returns
  1653. * 0: Successfully
  1654. */
  1655. static int pch_gbe_stop(struct net_device *netdev)
  1656. {
  1657. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1658. struct pch_gbe_hw *hw = &adapter->hw;
  1659. pch_gbe_down(adapter);
  1660. if (!adapter->wake_up_evt)
  1661. pch_gbe_hal_power_down_phy(hw);
  1662. pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
  1663. pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
  1664. return 0;
  1665. }
  1666. /**
  1667. * pch_gbe_xmit_frame - Packet transmitting start
  1668. * @skb: Socket buffer structure
  1669. * @netdev: Network interface device structure
  1670. * Returns
  1671. * - NETDEV_TX_OK: Normal end
  1672. * - NETDEV_TX_BUSY: Error end
  1673. */
  1674. static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1675. {
  1676. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1677. struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
  1678. unsigned long flags;
  1679. if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
  1680. pr_err("Transfer length Error: skb len: %d > max: %d\n",
  1681. skb->len, adapter->hw.mac.max_frame_size);
  1682. dev_kfree_skb_any(skb);
  1683. adapter->stats.tx_length_errors++;
  1684. return NETDEV_TX_OK;
  1685. }
  1686. if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
  1687. /* Collision - tell upper layer to requeue */
  1688. return NETDEV_TX_LOCKED;
  1689. }
  1690. if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
  1691. netif_stop_queue(netdev);
  1692. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  1693. pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
  1694. tx_ring->next_to_use, tx_ring->next_to_clean);
  1695. return NETDEV_TX_BUSY;
  1696. }
  1697. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  1698. /* CRC,ITAG no support */
  1699. pch_gbe_tx_queue(adapter, tx_ring, skb);
  1700. return NETDEV_TX_OK;
  1701. }
  1702. /**
  1703. * pch_gbe_get_stats - Get System Network Statistics
  1704. * @netdev: Network interface device structure
  1705. * Returns: The current stats
  1706. */
  1707. static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
  1708. {
  1709. /* only return the current stats */
  1710. return &netdev->stats;
  1711. }
  1712. /**
  1713. * pch_gbe_set_multi - Multicast and Promiscuous mode set
  1714. * @netdev: Network interface device structure
  1715. */
  1716. static void pch_gbe_set_multi(struct net_device *netdev)
  1717. {
  1718. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1719. struct pch_gbe_hw *hw = &adapter->hw;
  1720. struct netdev_hw_addr *ha;
  1721. u8 *mta_list;
  1722. u32 rctl;
  1723. int i;
  1724. int mc_count;
  1725. pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
  1726. /* Check for Promiscuous and All Multicast modes */
  1727. rctl = ioread32(&hw->reg->RX_MODE);
  1728. mc_count = netdev_mc_count(netdev);
  1729. if ((netdev->flags & IFF_PROMISC)) {
  1730. rctl &= ~PCH_GBE_ADD_FIL_EN;
  1731. rctl &= ~PCH_GBE_MLT_FIL_EN;
  1732. } else if ((netdev->flags & IFF_ALLMULTI)) {
  1733. /* all the multicasting receive permissions */
  1734. rctl |= PCH_GBE_ADD_FIL_EN;
  1735. rctl &= ~PCH_GBE_MLT_FIL_EN;
  1736. } else {
  1737. if (mc_count >= PCH_GBE_MAR_ENTRIES) {
  1738. /* all the multicasting receive permissions */
  1739. rctl |= PCH_GBE_ADD_FIL_EN;
  1740. rctl &= ~PCH_GBE_MLT_FIL_EN;
  1741. } else {
  1742. rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
  1743. }
  1744. }
  1745. iowrite32(rctl, &hw->reg->RX_MODE);
  1746. if (mc_count >= PCH_GBE_MAR_ENTRIES)
  1747. return;
  1748. mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
  1749. if (!mta_list)
  1750. return;
  1751. /* The shared function expects a packed array of only addresses. */
  1752. i = 0;
  1753. netdev_for_each_mc_addr(ha, netdev) {
  1754. if (i == mc_count)
  1755. break;
  1756. memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
  1757. }
  1758. pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
  1759. PCH_GBE_MAR_ENTRIES);
  1760. kfree(mta_list);
  1761. pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
  1762. ioread32(&hw->reg->RX_MODE), mc_count);
  1763. }
  1764. /**
  1765. * pch_gbe_set_mac - Change the Ethernet Address of the NIC
  1766. * @netdev: Network interface device structure
  1767. * @addr: Pointer to an address structure
  1768. * Returns
  1769. * 0: Successfully
  1770. * -EADDRNOTAVAIL: Failed
  1771. */
  1772. static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
  1773. {
  1774. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1775. struct sockaddr *skaddr = addr;
  1776. int ret_val;
  1777. if (!is_valid_ether_addr(skaddr->sa_data)) {
  1778. ret_val = -EADDRNOTAVAIL;
  1779. } else {
  1780. memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
  1781. memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
  1782. pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
  1783. ret_val = 0;
  1784. }
  1785. pr_debug("ret_val : 0x%08x\n", ret_val);
  1786. pr_debug("dev_addr : %pM\n", netdev->dev_addr);
  1787. pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
  1788. pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
  1789. ioread32(&adapter->hw.reg->mac_adr[0].high),
  1790. ioread32(&adapter->hw.reg->mac_adr[0].low));
  1791. return ret_val;
  1792. }
  1793. /**
  1794. * pch_gbe_change_mtu - Change the Maximum Transfer Unit
  1795. * @netdev: Network interface device structure
  1796. * @new_mtu: New value for maximum frame size
  1797. * Returns
  1798. * 0: Successfully
  1799. * -EINVAL: Failed
  1800. */
  1801. static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
  1802. {
  1803. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1804. int max_frame;
  1805. max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  1806. if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
  1807. (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
  1808. pr_err("Invalid MTU setting\n");
  1809. return -EINVAL;
  1810. }
  1811. if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
  1812. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
  1813. else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
  1814. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
  1815. else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
  1816. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
  1817. else
  1818. adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
  1819. netdev->mtu = new_mtu;
  1820. adapter->hw.mac.max_frame_size = max_frame;
  1821. if (netif_running(netdev))
  1822. pch_gbe_reinit_locked(adapter);
  1823. else
  1824. pch_gbe_reset(adapter);
  1825. pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
  1826. max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
  1827. adapter->hw.mac.max_frame_size);
  1828. return 0;
  1829. }
  1830. /**
  1831. * pch_gbe_ioctl - Controls register through a MII interface
  1832. * @netdev: Network interface device structure
  1833. * @ifr: Pointer to ifr structure
  1834. * @cmd: Control command
  1835. * Returns
  1836. * 0: Successfully
  1837. * Negative value: Failed
  1838. */
  1839. static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  1840. {
  1841. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1842. pr_debug("cmd : 0x%04x\n", cmd);
  1843. return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
  1844. }
  1845. /**
  1846. * pch_gbe_tx_timeout - Respond to a Tx Hang
  1847. * @netdev: Network interface device structure
  1848. */
  1849. static void pch_gbe_tx_timeout(struct net_device *netdev)
  1850. {
  1851. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1852. /* Do the reset outside of interrupt context */
  1853. adapter->stats.tx_timeout_count++;
  1854. schedule_work(&adapter->reset_task);
  1855. }
  1856. /**
  1857. * pch_gbe_napi_poll - NAPI receive and transfer polling callback
  1858. * @napi: Pointer of polling device struct
  1859. * @budget: The maximum number of a packet
  1860. * Returns
  1861. * false: Exit the polling mode
  1862. * true: Continue the polling mode
  1863. */
  1864. static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
  1865. {
  1866. struct pch_gbe_adapter *adapter =
  1867. container_of(napi, struct pch_gbe_adapter, napi);
  1868. struct net_device *netdev = adapter->netdev;
  1869. int work_done = 0;
  1870. bool poll_end_flag = false;
  1871. bool cleaned = false;
  1872. pr_debug("budget : %d\n", budget);
  1873. /* Keep link state information with original netdev */
  1874. if (!netif_carrier_ok(netdev)) {
  1875. poll_end_flag = true;
  1876. } else {
  1877. cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
  1878. pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
  1879. if (cleaned)
  1880. work_done = budget;
  1881. /* If no Tx and not enough Rx work done,
  1882. * exit the polling mode
  1883. */
  1884. if ((work_done < budget) || !netif_running(netdev))
  1885. poll_end_flag = true;
  1886. }
  1887. if (poll_end_flag) {
  1888. napi_complete(napi);
  1889. pch_gbe_irq_enable(adapter);
  1890. }
  1891. pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
  1892. poll_end_flag, work_done, budget);
  1893. return work_done;
  1894. }
  1895. #ifdef CONFIG_NET_POLL_CONTROLLER
  1896. /**
  1897. * pch_gbe_netpoll - Used by things like netconsole to send skbs
  1898. * @netdev: Network interface device structure
  1899. */
  1900. static void pch_gbe_netpoll(struct net_device *netdev)
  1901. {
  1902. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1903. disable_irq(adapter->pdev->irq);
  1904. pch_gbe_intr(adapter->pdev->irq, netdev);
  1905. enable_irq(adapter->pdev->irq);
  1906. }
  1907. #endif
  1908. static const struct net_device_ops pch_gbe_netdev_ops = {
  1909. .ndo_open = pch_gbe_open,
  1910. .ndo_stop = pch_gbe_stop,
  1911. .ndo_start_xmit = pch_gbe_xmit_frame,
  1912. .ndo_get_stats = pch_gbe_get_stats,
  1913. .ndo_set_mac_address = pch_gbe_set_mac,
  1914. .ndo_tx_timeout = pch_gbe_tx_timeout,
  1915. .ndo_change_mtu = pch_gbe_change_mtu,
  1916. .ndo_do_ioctl = pch_gbe_ioctl,
  1917. .ndo_set_multicast_list = &pch_gbe_set_multi,
  1918. #ifdef CONFIG_NET_POLL_CONTROLLER
  1919. .ndo_poll_controller = pch_gbe_netpoll,
  1920. #endif
  1921. };
  1922. static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
  1923. pci_channel_state_t state)
  1924. {
  1925. struct net_device *netdev = pci_get_drvdata(pdev);
  1926. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1927. netif_device_detach(netdev);
  1928. if (netif_running(netdev))
  1929. pch_gbe_down(adapter);
  1930. pci_disable_device(pdev);
  1931. /* Request a slot slot reset. */
  1932. return PCI_ERS_RESULT_NEED_RESET;
  1933. }
  1934. static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
  1935. {
  1936. struct net_device *netdev = pci_get_drvdata(pdev);
  1937. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1938. struct pch_gbe_hw *hw = &adapter->hw;
  1939. if (pci_enable_device(pdev)) {
  1940. pr_err("Cannot re-enable PCI device after reset\n");
  1941. return PCI_ERS_RESULT_DISCONNECT;
  1942. }
  1943. pci_set_master(pdev);
  1944. pci_enable_wake(pdev, PCI_D0, 0);
  1945. pch_gbe_hal_power_up_phy(hw);
  1946. pch_gbe_reset(adapter);
  1947. /* Clear wake up status */
  1948. pch_gbe_mac_set_wol_event(hw, 0);
  1949. return PCI_ERS_RESULT_RECOVERED;
  1950. }
  1951. static void pch_gbe_io_resume(struct pci_dev *pdev)
  1952. {
  1953. struct net_device *netdev = pci_get_drvdata(pdev);
  1954. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1955. if (netif_running(netdev)) {
  1956. if (pch_gbe_up(adapter)) {
  1957. pr_debug("can't bring device back up after reset\n");
  1958. return;
  1959. }
  1960. }
  1961. netif_device_attach(netdev);
  1962. }
  1963. static int __pch_gbe_suspend(struct pci_dev *pdev)
  1964. {
  1965. struct net_device *netdev = pci_get_drvdata(pdev);
  1966. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1967. struct pch_gbe_hw *hw = &adapter->hw;
  1968. u32 wufc = adapter->wake_up_evt;
  1969. int retval = 0;
  1970. netif_device_detach(netdev);
  1971. if (netif_running(netdev))
  1972. pch_gbe_down(adapter);
  1973. if (wufc) {
  1974. pch_gbe_set_multi(netdev);
  1975. pch_gbe_setup_rctl(adapter);
  1976. pch_gbe_configure_rx(adapter);
  1977. pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
  1978. hw->mac.link_duplex);
  1979. pch_gbe_set_mode(adapter, hw->mac.link_speed,
  1980. hw->mac.link_duplex);
  1981. pch_gbe_mac_set_wol_event(hw, wufc);
  1982. pci_disable_device(pdev);
  1983. } else {
  1984. pch_gbe_hal_power_down_phy(hw);
  1985. pch_gbe_mac_set_wol_event(hw, wufc);
  1986. pci_disable_device(pdev);
  1987. }
  1988. return retval;
  1989. }
  1990. #ifdef CONFIG_PM
  1991. static int pch_gbe_suspend(struct device *device)
  1992. {
  1993. struct pci_dev *pdev = to_pci_dev(device);
  1994. return __pch_gbe_suspend(pdev);
  1995. }
  1996. static int pch_gbe_resume(struct device *device)
  1997. {
  1998. struct pci_dev *pdev = to_pci_dev(device);
  1999. struct net_device *netdev = pci_get_drvdata(pdev);
  2000. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2001. struct pch_gbe_hw *hw = &adapter->hw;
  2002. u32 err;
  2003. err = pci_enable_device(pdev);
  2004. if (err) {
  2005. pr_err("Cannot enable PCI device from suspend\n");
  2006. return err;
  2007. }
  2008. pci_set_master(pdev);
  2009. pch_gbe_hal_power_up_phy(hw);
  2010. pch_gbe_reset(adapter);
  2011. /* Clear wake on lan control and status */
  2012. pch_gbe_mac_set_wol_event(hw, 0);
  2013. if (netif_running(netdev))
  2014. pch_gbe_up(adapter);
  2015. netif_device_attach(netdev);
  2016. return 0;
  2017. }
  2018. #endif /* CONFIG_PM */
  2019. static void pch_gbe_shutdown(struct pci_dev *pdev)
  2020. {
  2021. __pch_gbe_suspend(pdev);
  2022. if (system_state == SYSTEM_POWER_OFF) {
  2023. pci_wake_from_d3(pdev, true);
  2024. pci_set_power_state(pdev, PCI_D3hot);
  2025. }
  2026. }
  2027. static void pch_gbe_remove(struct pci_dev *pdev)
  2028. {
  2029. struct net_device *netdev = pci_get_drvdata(pdev);
  2030. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2031. cancel_work_sync(&adapter->reset_task);
  2032. unregister_netdev(netdev);
  2033. pch_gbe_hal_phy_hw_reset(&adapter->hw);
  2034. kfree(adapter->tx_ring);
  2035. kfree(adapter->rx_ring);
  2036. iounmap(adapter->hw.reg);
  2037. pci_release_regions(pdev);
  2038. free_netdev(netdev);
  2039. pci_disable_device(pdev);
  2040. }
  2041. static int pch_gbe_probe(struct pci_dev *pdev,
  2042. const struct pci_device_id *pci_id)
  2043. {
  2044. struct net_device *netdev;
  2045. struct pch_gbe_adapter *adapter;
  2046. int ret;
  2047. ret = pci_enable_device(pdev);
  2048. if (ret)
  2049. return ret;
  2050. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
  2051. || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2052. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2053. if (ret) {
  2054. ret = pci_set_consistent_dma_mask(pdev,
  2055. DMA_BIT_MASK(32));
  2056. if (ret) {
  2057. dev_err(&pdev->dev, "ERR: No usable DMA "
  2058. "configuration, aborting\n");
  2059. goto err_disable_device;
  2060. }
  2061. }
  2062. }
  2063. ret = pci_request_regions(pdev, KBUILD_MODNAME);
  2064. if (ret) {
  2065. dev_err(&pdev->dev,
  2066. "ERR: Can't reserve PCI I/O and memory resources\n");
  2067. goto err_disable_device;
  2068. }
  2069. pci_set_master(pdev);
  2070. netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
  2071. if (!netdev) {
  2072. ret = -ENOMEM;
  2073. dev_err(&pdev->dev,
  2074. "ERR: Can't allocate and set up an Ethernet device\n");
  2075. goto err_release_pci;
  2076. }
  2077. SET_NETDEV_DEV(netdev, &pdev->dev);
  2078. pci_set_drvdata(pdev, netdev);
  2079. adapter = netdev_priv(netdev);
  2080. adapter->netdev = netdev;
  2081. adapter->pdev = pdev;
  2082. adapter->hw.back = adapter;
  2083. adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
  2084. if (!adapter->hw.reg) {
  2085. ret = -EIO;
  2086. dev_err(&pdev->dev, "Can't ioremap\n");
  2087. goto err_free_netdev;
  2088. }
  2089. netdev->netdev_ops = &pch_gbe_netdev_ops;
  2090. netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
  2091. netif_napi_add(netdev, &adapter->napi,
  2092. pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
  2093. netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
  2094. pch_gbe_set_ethtool_ops(netdev);
  2095. pch_gbe_mac_load_mac_addr(&adapter->hw);
  2096. pch_gbe_mac_reset_hw(&adapter->hw);
  2097. /* setup the private structure */
  2098. ret = pch_gbe_sw_init(adapter);
  2099. if (ret)
  2100. goto err_iounmap;
  2101. /* Initialize PHY */
  2102. ret = pch_gbe_init_phy(adapter);
  2103. if (ret) {
  2104. dev_err(&pdev->dev, "PHY initialize error\n");
  2105. goto err_free_adapter;
  2106. }
  2107. pch_gbe_hal_get_bus_info(&adapter->hw);
  2108. /* Read the MAC address. and store to the private data */
  2109. ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
  2110. if (ret) {
  2111. dev_err(&pdev->dev, "MAC address Read Error\n");
  2112. goto err_free_adapter;
  2113. }
  2114. memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
  2115. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2116. dev_err(&pdev->dev, "Invalid MAC Address\n");
  2117. ret = -EIO;
  2118. goto err_free_adapter;
  2119. }
  2120. setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
  2121. (unsigned long)adapter);
  2122. INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
  2123. pch_gbe_check_options(adapter);
  2124. if (adapter->tx_csum)
  2125. netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  2126. else
  2127. netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
  2128. /* initialize the wol settings based on the eeprom settings */
  2129. adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
  2130. dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
  2131. /* reset the hardware with the new settings */
  2132. pch_gbe_reset(adapter);
  2133. ret = register_netdev(netdev);
  2134. if (ret)
  2135. goto err_free_adapter;
  2136. /* tell the stack to leave us alone until pch_gbe_open() is called */
  2137. netif_carrier_off(netdev);
  2138. netif_stop_queue(netdev);
  2139. dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
  2140. device_set_wakeup_enable(&pdev->dev, 1);
  2141. return 0;
  2142. err_free_adapter:
  2143. pch_gbe_hal_phy_hw_reset(&adapter->hw);
  2144. kfree(adapter->tx_ring);
  2145. kfree(adapter->rx_ring);
  2146. err_iounmap:
  2147. iounmap(adapter->hw.reg);
  2148. err_free_netdev:
  2149. free_netdev(netdev);
  2150. err_release_pci:
  2151. pci_release_regions(pdev);
  2152. err_disable_device:
  2153. pci_disable_device(pdev);
  2154. return ret;
  2155. }
  2156. static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
  2157. {.vendor = PCI_VENDOR_ID_INTEL,
  2158. .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
  2159. .subvendor = PCI_ANY_ID,
  2160. .subdevice = PCI_ANY_ID,
  2161. .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
  2162. .class_mask = (0xFFFF00)
  2163. },
  2164. /* required last entry */
  2165. {0}
  2166. };
  2167. #ifdef CONFIG_PM
  2168. static const struct dev_pm_ops pch_gbe_pm_ops = {
  2169. .suspend = pch_gbe_suspend,
  2170. .resume = pch_gbe_resume,
  2171. .freeze = pch_gbe_suspend,
  2172. .thaw = pch_gbe_resume,
  2173. .poweroff = pch_gbe_suspend,
  2174. .restore = pch_gbe_resume,
  2175. };
  2176. #endif
  2177. static struct pci_error_handlers pch_gbe_err_handler = {
  2178. .error_detected = pch_gbe_io_error_detected,
  2179. .slot_reset = pch_gbe_io_slot_reset,
  2180. .resume = pch_gbe_io_resume
  2181. };
  2182. static struct pci_driver pch_gbe_pcidev = {
  2183. .name = KBUILD_MODNAME,
  2184. .id_table = pch_gbe_pcidev_id,
  2185. .probe = pch_gbe_probe,
  2186. .remove = pch_gbe_remove,
  2187. #ifdef CONFIG_PM
  2188. .driver.pm = &pch_gbe_pm_ops,
  2189. #endif
  2190. .shutdown = pch_gbe_shutdown,
  2191. .err_handler = &pch_gbe_err_handler
  2192. };
  2193. static int __init pch_gbe_init_module(void)
  2194. {
  2195. int ret;
  2196. ret = pci_register_driver(&pch_gbe_pcidev);
  2197. if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
  2198. if (copybreak == 0) {
  2199. pr_info("copybreak disabled\n");
  2200. } else {
  2201. pr_info("copybreak enabled for packets <= %u bytes\n",
  2202. copybreak);
  2203. }
  2204. }
  2205. return ret;
  2206. }
  2207. static void __exit pch_gbe_exit_module(void)
  2208. {
  2209. pci_unregister_driver(&pch_gbe_pcidev);
  2210. }
  2211. module_init(pch_gbe_init_module);
  2212. module_exit(pch_gbe_exit_module);
  2213. MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
  2214. MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
  2215. MODULE_LICENSE("GPL");
  2216. MODULE_VERSION(DRV_VERSION);
  2217. MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
  2218. module_param(copybreak, uint, 0644);
  2219. MODULE_PARM_DESC(copybreak,
  2220. "Maximum size of packet that is copied to a new buffer on receive");
  2221. /* pch_gbe_main.c */