pch_gbe_main.c 77 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820
  1. /*
  2. * Copyright (C) 1999 - 2010 Intel Corporation.
  3. * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
  4. *
  5. * This code was derived from the Intel e1000e Linux driver.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include "pch_gbe.h"
  21. #include "pch_gbe_api.h"
  22. #include <linux/module.h>
  23. #ifdef CONFIG_PCH_PTP
  24. #include <linux/net_tstamp.h>
  25. #include <linux/ptp_classify.h>
  26. #endif
  27. #define DRV_VERSION "1.00"
  28. const char pch_driver_version[] = DRV_VERSION;
  29. #define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
  30. #define PCH_GBE_MAR_ENTRIES 16
  31. #define PCH_GBE_SHORT_PKT 64
  32. #define DSC_INIT16 0xC000
  33. #define PCH_GBE_DMA_ALIGN 0
  34. #define PCH_GBE_DMA_PADDING 2
  35. #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
  36. #define PCH_GBE_COPYBREAK_DEFAULT 256
  37. #define PCH_GBE_PCI_BAR 1
  38. #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
  39. /* Macros for ML7223 */
  40. #define PCI_VENDOR_ID_ROHM 0x10db
  41. #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
  42. /* Macros for ML7831 */
  43. #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
  44. #define PCH_GBE_TX_WEIGHT 64
  45. #define PCH_GBE_RX_WEIGHT 64
  46. #define PCH_GBE_RX_BUFFER_WRITE 16
  47. /* Initialize the wake-on-LAN settings */
  48. #define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
  49. #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
  50. PCH_GBE_CHIP_TYPE_INTERNAL | \
  51. PCH_GBE_RGMII_MODE_RGMII \
  52. )
  53. /* Ethertype field values */
  54. #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
  55. #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
  56. #define PCH_GBE_FRAME_SIZE_2048 2048
  57. #define PCH_GBE_FRAME_SIZE_4096 4096
  58. #define PCH_GBE_FRAME_SIZE_8192 8192
  59. #define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
  60. #define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
  61. #define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
  62. #define PCH_GBE_DESC_UNUSED(R) \
  63. ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  64. (R)->next_to_clean - (R)->next_to_use - 1)
  65. /* Pause packet value */
  66. #define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
  67. #define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
  68. #define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
  69. #define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
  70. #define PCH_GBE_ETH_ALEN 6
  71. /* This defines the bits that are set in the Interrupt Mask
  72. * Set/Read Register. Each bit is documented below:
  73. * o RXT0 = Receiver Timer Interrupt (ring 0)
  74. * o TXDW = Transmit Descriptor Written Back
  75. * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
  76. * o RXSEQ = Receive Sequence Error
  77. * o LSC = Link Status Change
  78. */
  79. #define PCH_GBE_INT_ENABLE_MASK ( \
  80. PCH_GBE_INT_RX_DMA_CMPLT | \
  81. PCH_GBE_INT_RX_DSC_EMP | \
  82. PCH_GBE_INT_RX_FIFO_ERR | \
  83. PCH_GBE_INT_WOL_DET | \
  84. PCH_GBE_INT_TX_CMPLT \
  85. )
  86. #define PCH_GBE_INT_DISABLE_ALL 0
  87. #ifdef CONFIG_PCH_PTP
  88. /* Macros for ieee1588 */
  89. #define TICKS_NS_SHIFT 5
  90. /* 0x40 Time Synchronization Channel Control Register Bits */
  91. #define MASTER_MODE (1<<0)
  92. #define SLAVE_MODE (0<<0)
  93. #define V2_MODE (1<<31)
  94. #define CAP_MODE0 (0<<16)
  95. #define CAP_MODE2 (1<<17)
  96. /* 0x44 Time Synchronization Channel Event Register Bits */
  97. #define TX_SNAPSHOT_LOCKED (1<<0)
  98. #define RX_SNAPSHOT_LOCKED (1<<1)
  99. #endif
  100. static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
  101. static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
  102. static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
  103. int data);
  104. #ifdef CONFIG_PCH_PTP
  105. static struct sock_filter ptp_filter[] = {
  106. PTP_FILTER
  107. };
  108. static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
  109. {
  110. u8 *data = skb->data;
  111. unsigned int offset;
  112. u16 *hi, *id;
  113. u32 lo;
  114. if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
  115. (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
  116. return 0;
  117. }
  118. offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
  119. if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
  120. return 0;
  121. hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
  122. id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
  123. memcpy(&lo, &hi[1], sizeof(lo));
  124. return (uid_hi == *hi &&
  125. uid_lo == lo &&
  126. seqid == *id);
  127. }
  128. static void pch_rx_timestamp(
  129. struct pch_gbe_adapter *adapter, struct sk_buff *skb)
  130. {
  131. struct skb_shared_hwtstamps *shhwtstamps;
  132. struct pci_dev *pdev;
  133. u64 ns;
  134. u32 hi, lo, val;
  135. u16 uid, seq;
  136. if (!adapter->hwts_rx_en)
  137. return;
  138. /* Get ieee1588's dev information */
  139. pdev = adapter->ptp_pdev;
  140. val = pch_ch_event_read(pdev);
  141. if (!(val & RX_SNAPSHOT_LOCKED))
  142. return;
  143. lo = pch_src_uuid_lo_read(pdev);
  144. hi = pch_src_uuid_hi_read(pdev);
  145. uid = hi & 0xffff;
  146. seq = (hi >> 16) & 0xffff;
  147. if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
  148. goto out;
  149. ns = pch_rx_snap_read(pdev);
  150. ns <<= TICKS_NS_SHIFT;
  151. shhwtstamps = skb_hwtstamps(skb);
  152. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  153. shhwtstamps->hwtstamp = ns_to_ktime(ns);
  154. out:
  155. pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
  156. }
  157. static void pch_tx_timestamp(
  158. struct pch_gbe_adapter *adapter, struct sk_buff *skb)
  159. {
  160. struct skb_shared_hwtstamps shhwtstamps;
  161. struct pci_dev *pdev;
  162. struct skb_shared_info *shtx;
  163. u64 ns;
  164. u32 cnt, val;
  165. shtx = skb_shinfo(skb);
  166. if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
  167. shtx->tx_flags |= SKBTX_IN_PROGRESS;
  168. else
  169. return;
  170. /* Get ieee1588's dev information */
  171. pdev = adapter->ptp_pdev;
  172. /*
  173. * This really stinks, but we have to poll for the Tx time stamp.
  174. * Usually, the time stamp is ready after 4 to 6 microseconds.
  175. */
  176. for (cnt = 0; cnt < 100; cnt++) {
  177. val = pch_ch_event_read(pdev);
  178. if (val & TX_SNAPSHOT_LOCKED)
  179. break;
  180. udelay(1);
  181. }
  182. if (!(val & TX_SNAPSHOT_LOCKED)) {
  183. shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
  184. return;
  185. }
  186. ns = pch_tx_snap_read(pdev);
  187. ns <<= TICKS_NS_SHIFT;
  188. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  189. shhwtstamps.hwtstamp = ns_to_ktime(ns);
  190. skb_tstamp_tx(skb, &shhwtstamps);
  191. pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
  192. }
  193. static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  194. {
  195. struct hwtstamp_config cfg;
  196. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  197. struct pci_dev *pdev;
  198. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  199. return -EFAULT;
  200. if (cfg.flags) /* reserved for future extensions */
  201. return -EINVAL;
  202. /* Get ieee1588's dev information */
  203. pdev = adapter->ptp_pdev;
  204. switch (cfg.tx_type) {
  205. case HWTSTAMP_TX_OFF:
  206. adapter->hwts_tx_en = 0;
  207. break;
  208. case HWTSTAMP_TX_ON:
  209. adapter->hwts_tx_en = 1;
  210. break;
  211. default:
  212. return -ERANGE;
  213. }
  214. switch (cfg.rx_filter) {
  215. case HWTSTAMP_FILTER_NONE:
  216. adapter->hwts_rx_en = 0;
  217. break;
  218. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  219. adapter->hwts_rx_en = 0;
  220. pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
  221. break;
  222. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  223. adapter->hwts_rx_en = 1;
  224. pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
  225. break;
  226. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  227. adapter->hwts_rx_en = 1;
  228. pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
  229. break;
  230. default:
  231. return -ERANGE;
  232. }
  233. /* Clear out any old time stamps. */
  234. pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
  235. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  236. }
  237. #endif
  238. inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
  239. {
  240. iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
  241. }
  242. /**
  243. * pch_gbe_mac_read_mac_addr - Read MAC address
  244. * @hw: Pointer to the HW structure
  245. * Returns
  246. * 0: Successful.
  247. */
  248. s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
  249. {
  250. u32 adr1a, adr1b;
  251. adr1a = ioread32(&hw->reg->mac_adr[0].high);
  252. adr1b = ioread32(&hw->reg->mac_adr[0].low);
  253. hw->mac.addr[0] = (u8)(adr1a & 0xFF);
  254. hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
  255. hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
  256. hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
  257. hw->mac.addr[4] = (u8)(adr1b & 0xFF);
  258. hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
  259. pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
  260. return 0;
  261. }
  262. /**
  263. * pch_gbe_wait_clr_bit - Wait to clear a bit
  264. * @reg: Pointer of register
  265. * @busy: Busy bit
  266. */
  267. static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
  268. {
  269. u32 tmp;
  270. /* wait busy */
  271. tmp = 1000;
  272. while ((ioread32(reg) & bit) && --tmp)
  273. cpu_relax();
  274. if (!tmp)
  275. pr_err("Error: busy bit is not cleared\n");
  276. }
  277. /**
  278. * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
  279. * @reg: Pointer of register
  280. * @busy: Busy bit
  281. */
  282. static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
  283. {
  284. u32 tmp;
  285. int ret = -1;
  286. /* wait busy */
  287. tmp = 20;
  288. while ((ioread32(reg) & bit) && --tmp)
  289. udelay(5);
  290. if (!tmp)
  291. pr_err("Error: busy bit is not cleared\n");
  292. else
  293. ret = 0;
  294. return ret;
  295. }
  296. /**
  297. * pch_gbe_mac_mar_set - Set MAC address register
  298. * @hw: Pointer to the HW structure
  299. * @addr: Pointer to the MAC address
  300. * @index: MAC address array register
  301. */
  302. static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
  303. {
  304. u32 mar_low, mar_high, adrmask;
  305. pr_debug("index : 0x%x\n", index);
  306. /*
  307. * HW expects these in little endian so we reverse the byte order
  308. * from network order (big endian) to little endian
  309. */
  310. mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
  311. ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
  312. mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
  313. /* Stop the MAC Address of index. */
  314. adrmask = ioread32(&hw->reg->ADDR_MASK);
  315. iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
  316. /* wait busy */
  317. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  318. /* Set the MAC address to the MAC address 1A/1B register */
  319. iowrite32(mar_high, &hw->reg->mac_adr[index].high);
  320. iowrite32(mar_low, &hw->reg->mac_adr[index].low);
  321. /* Start the MAC address of index */
  322. iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
  323. /* wait busy */
  324. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  325. }
  326. /**
  327. * pch_gbe_mac_reset_hw - Reset hardware
  328. * @hw: Pointer to the HW structure
  329. */
  330. static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
  331. {
  332. /* Read the MAC address. and store to the private data */
  333. pch_gbe_mac_read_mac_addr(hw);
  334. iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
  335. #ifdef PCH_GBE_MAC_IFOP_RGMII
  336. iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
  337. #endif
  338. pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
  339. /* Setup the receive address */
  340. pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
  341. return;
  342. }
  343. static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
  344. {
  345. /* Read the MAC address. and store to the private data */
  346. pch_gbe_mac_read_mac_addr(hw);
  347. iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
  348. pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
  349. /* Setup the MAC address */
  350. pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
  351. return;
  352. }
  353. /**
  354. * pch_gbe_mac_init_rx_addrs - Initialize receive address's
  355. * @hw: Pointer to the HW structure
  356. * @mar_count: Receive address registers
  357. */
  358. static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
  359. {
  360. u32 i;
  361. /* Setup the receive address */
  362. pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
  363. /* Zero out the other receive addresses */
  364. for (i = 1; i < mar_count; i++) {
  365. iowrite32(0, &hw->reg->mac_adr[i].high);
  366. iowrite32(0, &hw->reg->mac_adr[i].low);
  367. }
  368. iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
  369. /* wait busy */
  370. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  371. }
  372. /**
  373. * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
  374. * @hw: Pointer to the HW structure
  375. * @mc_addr_list: Array of multicast addresses to program
  376. * @mc_addr_count: Number of multicast addresses to program
  377. * @mar_used_count: The first MAC Address register free to program
  378. * @mar_total_num: Total number of supported MAC Address Registers
  379. */
  380. static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
  381. u8 *mc_addr_list, u32 mc_addr_count,
  382. u32 mar_used_count, u32 mar_total_num)
  383. {
  384. u32 i, adrmask;
  385. /* Load the first set of multicast addresses into the exact
  386. * filters (RAR). If there are not enough to fill the RAR
  387. * array, clear the filters.
  388. */
  389. for (i = mar_used_count; i < mar_total_num; i++) {
  390. if (mc_addr_count) {
  391. pch_gbe_mac_mar_set(hw, mc_addr_list, i);
  392. mc_addr_count--;
  393. mc_addr_list += PCH_GBE_ETH_ALEN;
  394. } else {
  395. /* Clear MAC address mask */
  396. adrmask = ioread32(&hw->reg->ADDR_MASK);
  397. iowrite32((adrmask | (0x0001 << i)),
  398. &hw->reg->ADDR_MASK);
  399. /* wait busy */
  400. pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
  401. /* Clear MAC address */
  402. iowrite32(0, &hw->reg->mac_adr[i].high);
  403. iowrite32(0, &hw->reg->mac_adr[i].low);
  404. }
  405. }
  406. }
  407. /**
  408. * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
  409. * @hw: Pointer to the HW structure
  410. * Returns
  411. * 0: Successful.
  412. * Negative value: Failed.
  413. */
  414. s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
  415. {
  416. struct pch_gbe_mac_info *mac = &hw->mac;
  417. u32 rx_fctrl;
  418. pr_debug("mac->fc = %u\n", mac->fc);
  419. rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
  420. switch (mac->fc) {
  421. case PCH_GBE_FC_NONE:
  422. rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
  423. mac->tx_fc_enable = false;
  424. break;
  425. case PCH_GBE_FC_RX_PAUSE:
  426. rx_fctrl |= PCH_GBE_FL_CTRL_EN;
  427. mac->tx_fc_enable = false;
  428. break;
  429. case PCH_GBE_FC_TX_PAUSE:
  430. rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
  431. mac->tx_fc_enable = true;
  432. break;
  433. case PCH_GBE_FC_FULL:
  434. rx_fctrl |= PCH_GBE_FL_CTRL_EN;
  435. mac->tx_fc_enable = true;
  436. break;
  437. default:
  438. pr_err("Flow control param set incorrectly\n");
  439. return -EINVAL;
  440. }
  441. if (mac->link_duplex == DUPLEX_HALF)
  442. rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
  443. iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
  444. pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
  445. ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
  446. return 0;
  447. }
  448. /**
  449. * pch_gbe_mac_set_wol_event - Set wake-on-lan event
  450. * @hw: Pointer to the HW structure
  451. * @wu_evt: Wake up event
  452. */
  453. static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
  454. {
  455. u32 addr_mask;
  456. pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
  457. wu_evt, ioread32(&hw->reg->ADDR_MASK));
  458. if (wu_evt) {
  459. /* Set Wake-On-Lan address mask */
  460. addr_mask = ioread32(&hw->reg->ADDR_MASK);
  461. iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
  462. /* wait busy */
  463. pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
  464. iowrite32(0, &hw->reg->WOL_ST);
  465. iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
  466. iowrite32(0x02, &hw->reg->TCPIP_ACC);
  467. iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
  468. } else {
  469. iowrite32(0, &hw->reg->WOL_CTRL);
  470. iowrite32(0, &hw->reg->WOL_ST);
  471. }
  472. return;
  473. }
  474. /**
  475. * pch_gbe_mac_ctrl_miim - Control MIIM interface
  476. * @hw: Pointer to the HW structure
  477. * @addr: Address of PHY
  478. * @dir: Operetion. (Write or Read)
  479. * @reg: Access register of PHY
  480. * @data: Write data.
  481. *
  482. * Returns: Read date.
  483. */
  484. u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
  485. u16 data)
  486. {
  487. u32 data_out = 0;
  488. unsigned int i;
  489. unsigned long flags;
  490. spin_lock_irqsave(&hw->miim_lock, flags);
  491. for (i = 100; i; --i) {
  492. if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
  493. break;
  494. udelay(20);
  495. }
  496. if (i == 0) {
  497. pr_err("pch-gbe.miim won't go Ready\n");
  498. spin_unlock_irqrestore(&hw->miim_lock, flags);
  499. return 0; /* No way to indicate timeout error */
  500. }
  501. iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
  502. (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
  503. dir | data), &hw->reg->MIIM);
  504. for (i = 0; i < 100; i++) {
  505. udelay(20);
  506. data_out = ioread32(&hw->reg->MIIM);
  507. if ((data_out & PCH_GBE_MIIM_OPER_READY))
  508. break;
  509. }
  510. spin_unlock_irqrestore(&hw->miim_lock, flags);
  511. pr_debug("PHY %s: reg=%d, data=0x%04X\n",
  512. dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
  513. dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
  514. return (u16) data_out;
  515. }
  516. /**
  517. * pch_gbe_mac_set_pause_packet - Set pause packet
  518. * @hw: Pointer to the HW structure
  519. */
  520. static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
  521. {
  522. unsigned long tmp2, tmp3;
  523. /* Set Pause packet */
  524. tmp2 = hw->mac.addr[1];
  525. tmp2 = (tmp2 << 8) | hw->mac.addr[0];
  526. tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
  527. tmp3 = hw->mac.addr[5];
  528. tmp3 = (tmp3 << 8) | hw->mac.addr[4];
  529. tmp3 = (tmp3 << 8) | hw->mac.addr[3];
  530. tmp3 = (tmp3 << 8) | hw->mac.addr[2];
  531. iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
  532. iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
  533. iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
  534. iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
  535. iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
  536. /* Transmit Pause Packet */
  537. iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
  538. pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  539. ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
  540. ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
  541. ioread32(&hw->reg->PAUSE_PKT5));
  542. return;
  543. }
  544. /**
  545. * pch_gbe_alloc_queues - Allocate memory for all rings
  546. * @adapter: Board private structure to initialize
  547. * Returns
  548. * 0: Successfully
  549. * Negative value: Failed
  550. */
  551. static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
  552. {
  553. adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
  554. if (!adapter->tx_ring)
  555. return -ENOMEM;
  556. adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
  557. if (!adapter->rx_ring) {
  558. kfree(adapter->tx_ring);
  559. return -ENOMEM;
  560. }
  561. return 0;
  562. }
  563. /**
  564. * pch_gbe_init_stats - Initialize status
  565. * @adapter: Board private structure to initialize
  566. */
  567. static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
  568. {
  569. memset(&adapter->stats, 0, sizeof(adapter->stats));
  570. return;
  571. }
  572. /**
  573. * pch_gbe_init_phy - Initialize PHY
  574. * @adapter: Board private structure to initialize
  575. * Returns
  576. * 0: Successfully
  577. * Negative value: Failed
  578. */
  579. static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
  580. {
  581. struct net_device *netdev = adapter->netdev;
  582. u32 addr;
  583. u16 bmcr, stat;
  584. /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
  585. for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
  586. adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
  587. bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
  588. stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
  589. stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
  590. if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
  591. break;
  592. }
  593. adapter->hw.phy.addr = adapter->mii.phy_id;
  594. pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
  595. if (addr == 32)
  596. return -EAGAIN;
  597. /* Selected the phy and isolate the rest */
  598. for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
  599. if (addr != adapter->mii.phy_id) {
  600. pch_gbe_mdio_write(netdev, addr, MII_BMCR,
  601. BMCR_ISOLATE);
  602. } else {
  603. bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
  604. pch_gbe_mdio_write(netdev, addr, MII_BMCR,
  605. bmcr & ~BMCR_ISOLATE);
  606. }
  607. }
  608. /* MII setup */
  609. adapter->mii.phy_id_mask = 0x1F;
  610. adapter->mii.reg_num_mask = 0x1F;
  611. adapter->mii.dev = adapter->netdev;
  612. adapter->mii.mdio_read = pch_gbe_mdio_read;
  613. adapter->mii.mdio_write = pch_gbe_mdio_write;
  614. adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
  615. return 0;
  616. }
  617. /**
  618. * pch_gbe_mdio_read - The read function for mii
  619. * @netdev: Network interface device structure
  620. * @addr: Phy ID
  621. * @reg: Access location
  622. * Returns
  623. * 0: Successfully
  624. * Negative value: Failed
  625. */
  626. static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
  627. {
  628. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  629. struct pch_gbe_hw *hw = &adapter->hw;
  630. return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
  631. (u16) 0);
  632. }
  633. /**
  634. * pch_gbe_mdio_write - The write function for mii
  635. * @netdev: Network interface device structure
  636. * @addr: Phy ID (not used)
  637. * @reg: Access location
  638. * @data: Write data
  639. */
  640. static void pch_gbe_mdio_write(struct net_device *netdev,
  641. int addr, int reg, int data)
  642. {
  643. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  644. struct pch_gbe_hw *hw = &adapter->hw;
  645. pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
  646. }
  647. /**
  648. * pch_gbe_reset_task - Reset processing at the time of transmission timeout
  649. * @work: Pointer of board private structure
  650. */
  651. static void pch_gbe_reset_task(struct work_struct *work)
  652. {
  653. struct pch_gbe_adapter *adapter;
  654. adapter = container_of(work, struct pch_gbe_adapter, reset_task);
  655. rtnl_lock();
  656. pch_gbe_reinit_locked(adapter);
  657. rtnl_unlock();
  658. }
  659. /**
  660. * pch_gbe_reinit_locked- Re-initialization
  661. * @adapter: Board private structure
  662. */
  663. void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
  664. {
  665. pch_gbe_down(adapter);
  666. pch_gbe_up(adapter);
  667. }
  668. /**
  669. * pch_gbe_reset - Reset GbE
  670. * @adapter: Board private structure
  671. */
  672. void pch_gbe_reset(struct pch_gbe_adapter *adapter)
  673. {
  674. pch_gbe_mac_reset_hw(&adapter->hw);
  675. /* Setup the receive address. */
  676. pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
  677. if (pch_gbe_hal_init_hw(&adapter->hw))
  678. pr_err("Hardware Error\n");
  679. }
  680. /**
  681. * pch_gbe_free_irq - Free an interrupt
  682. * @adapter: Board private structure
  683. */
  684. static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
  685. {
  686. struct net_device *netdev = adapter->netdev;
  687. free_irq(adapter->pdev->irq, netdev);
  688. if (adapter->have_msi) {
  689. pci_disable_msi(adapter->pdev);
  690. pr_debug("call pci_disable_msi\n");
  691. }
  692. }
  693. /**
  694. * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
  695. * @adapter: Board private structure
  696. */
  697. static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
  698. {
  699. struct pch_gbe_hw *hw = &adapter->hw;
  700. atomic_inc(&adapter->irq_sem);
  701. iowrite32(0, &hw->reg->INT_EN);
  702. ioread32(&hw->reg->INT_ST);
  703. synchronize_irq(adapter->pdev->irq);
  704. pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
  705. }
  706. /**
  707. * pch_gbe_irq_enable - Enable default interrupt generation settings
  708. * @adapter: Board private structure
  709. */
  710. static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
  711. {
  712. struct pch_gbe_hw *hw = &adapter->hw;
  713. if (likely(atomic_dec_and_test(&adapter->irq_sem)))
  714. iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
  715. ioread32(&hw->reg->INT_ST);
  716. pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
  717. }
  718. /**
  719. * pch_gbe_setup_tctl - configure the Transmit control registers
  720. * @adapter: Board private structure
  721. */
  722. static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
  723. {
  724. struct pch_gbe_hw *hw = &adapter->hw;
  725. u32 tx_mode, tcpip;
  726. tx_mode = PCH_GBE_TM_LONG_PKT |
  727. PCH_GBE_TM_ST_AND_FD |
  728. PCH_GBE_TM_SHORT_PKT |
  729. PCH_GBE_TM_TH_TX_STRT_8 |
  730. PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
  731. iowrite32(tx_mode, &hw->reg->TX_MODE);
  732. tcpip = ioread32(&hw->reg->TCPIP_ACC);
  733. tcpip |= PCH_GBE_TX_TCPIPACC_EN;
  734. iowrite32(tcpip, &hw->reg->TCPIP_ACC);
  735. return;
  736. }
  737. /**
  738. * pch_gbe_configure_tx - Configure Transmit Unit after Reset
  739. * @adapter: Board private structure
  740. */
  741. static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
  742. {
  743. struct pch_gbe_hw *hw = &adapter->hw;
  744. u32 tdba, tdlen, dctrl;
  745. pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
  746. (unsigned long long)adapter->tx_ring->dma,
  747. adapter->tx_ring->size);
  748. /* Setup the HW Tx Head and Tail descriptor pointers */
  749. tdba = adapter->tx_ring->dma;
  750. tdlen = adapter->tx_ring->size - 0x10;
  751. iowrite32(tdba, &hw->reg->TX_DSC_BASE);
  752. iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
  753. iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
  754. /* Enables Transmission DMA */
  755. dctrl = ioread32(&hw->reg->DMA_CTRL);
  756. dctrl |= PCH_GBE_TX_DMA_EN;
  757. iowrite32(dctrl, &hw->reg->DMA_CTRL);
  758. }
  759. /**
  760. * pch_gbe_setup_rctl - Configure the receive control registers
  761. * @adapter: Board private structure
  762. */
  763. static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
  764. {
  765. struct pch_gbe_hw *hw = &adapter->hw;
  766. u32 rx_mode, tcpip;
  767. rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
  768. PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
  769. iowrite32(rx_mode, &hw->reg->RX_MODE);
  770. tcpip = ioread32(&hw->reg->TCPIP_ACC);
  771. tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
  772. tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
  773. iowrite32(tcpip, &hw->reg->TCPIP_ACC);
  774. return;
  775. }
  776. /**
  777. * pch_gbe_configure_rx - Configure Receive Unit after Reset
  778. * @adapter: Board private structure
  779. */
  780. static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
  781. {
  782. struct pch_gbe_hw *hw = &adapter->hw;
  783. u32 rdba, rdlen, rctl, rxdma;
  784. pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
  785. (unsigned long long)adapter->rx_ring->dma,
  786. adapter->rx_ring->size);
  787. pch_gbe_mac_force_mac_fc(hw);
  788. /* Disables Receive MAC */
  789. rctl = ioread32(&hw->reg->MAC_RX_EN);
  790. iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
  791. /* Disables Receive DMA */
  792. rxdma = ioread32(&hw->reg->DMA_CTRL);
  793. rxdma &= ~PCH_GBE_RX_DMA_EN;
  794. iowrite32(rxdma, &hw->reg->DMA_CTRL);
  795. pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
  796. ioread32(&hw->reg->MAC_RX_EN),
  797. ioread32(&hw->reg->DMA_CTRL));
  798. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  799. * the Base and Length of the Rx Descriptor Ring */
  800. rdba = adapter->rx_ring->dma;
  801. rdlen = adapter->rx_ring->size - 0x10;
  802. iowrite32(rdba, &hw->reg->RX_DSC_BASE);
  803. iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
  804. iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
  805. }
  806. /**
  807. * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
  808. * @adapter: Board private structure
  809. * @buffer_info: Buffer information structure
  810. */
  811. static void pch_gbe_unmap_and_free_tx_resource(
  812. struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
  813. {
  814. if (buffer_info->mapped) {
  815. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  816. buffer_info->length, DMA_TO_DEVICE);
  817. buffer_info->mapped = false;
  818. }
  819. if (buffer_info->skb) {
  820. dev_kfree_skb_any(buffer_info->skb);
  821. buffer_info->skb = NULL;
  822. }
  823. }
  824. /**
  825. * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
  826. * @adapter: Board private structure
  827. * @buffer_info: Buffer information structure
  828. */
  829. static void pch_gbe_unmap_and_free_rx_resource(
  830. struct pch_gbe_adapter *adapter,
  831. struct pch_gbe_buffer *buffer_info)
  832. {
  833. if (buffer_info->mapped) {
  834. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  835. buffer_info->length, DMA_FROM_DEVICE);
  836. buffer_info->mapped = false;
  837. }
  838. if (buffer_info->skb) {
  839. dev_kfree_skb_any(buffer_info->skb);
  840. buffer_info->skb = NULL;
  841. }
  842. }
  843. /**
  844. * pch_gbe_clean_tx_ring - Free Tx Buffers
  845. * @adapter: Board private structure
  846. * @tx_ring: Ring to be cleaned
  847. */
  848. static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
  849. struct pch_gbe_tx_ring *tx_ring)
  850. {
  851. struct pch_gbe_hw *hw = &adapter->hw;
  852. struct pch_gbe_buffer *buffer_info;
  853. unsigned long size;
  854. unsigned int i;
  855. /* Free all the Tx ring sk_buffs */
  856. for (i = 0; i < tx_ring->count; i++) {
  857. buffer_info = &tx_ring->buffer_info[i];
  858. pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
  859. }
  860. pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
  861. size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
  862. memset(tx_ring->buffer_info, 0, size);
  863. /* Zero out the descriptor ring */
  864. memset(tx_ring->desc, 0, tx_ring->size);
  865. tx_ring->next_to_use = 0;
  866. tx_ring->next_to_clean = 0;
  867. iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
  868. iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
  869. }
  870. /**
  871. * pch_gbe_clean_rx_ring - Free Rx Buffers
  872. * @adapter: Board private structure
  873. * @rx_ring: Ring to free buffers from
  874. */
  875. static void
  876. pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
  877. struct pch_gbe_rx_ring *rx_ring)
  878. {
  879. struct pch_gbe_hw *hw = &adapter->hw;
  880. struct pch_gbe_buffer *buffer_info;
  881. unsigned long size;
  882. unsigned int i;
  883. /* Free all the Rx ring sk_buffs */
  884. for (i = 0; i < rx_ring->count; i++) {
  885. buffer_info = &rx_ring->buffer_info[i];
  886. pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
  887. }
  888. pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
  889. size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
  890. memset(rx_ring->buffer_info, 0, size);
  891. /* Zero out the descriptor ring */
  892. memset(rx_ring->desc, 0, rx_ring->size);
  893. rx_ring->next_to_clean = 0;
  894. rx_ring->next_to_use = 0;
  895. iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
  896. iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
  897. }
  898. static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
  899. u16 duplex)
  900. {
  901. struct pch_gbe_hw *hw = &adapter->hw;
  902. unsigned long rgmii = 0;
  903. /* Set the RGMII control. */
  904. #ifdef PCH_GBE_MAC_IFOP_RGMII
  905. switch (speed) {
  906. case SPEED_10:
  907. rgmii = (PCH_GBE_RGMII_RATE_2_5M |
  908. PCH_GBE_MAC_RGMII_CTRL_SETTING);
  909. break;
  910. case SPEED_100:
  911. rgmii = (PCH_GBE_RGMII_RATE_25M |
  912. PCH_GBE_MAC_RGMII_CTRL_SETTING);
  913. break;
  914. case SPEED_1000:
  915. rgmii = (PCH_GBE_RGMII_RATE_125M |
  916. PCH_GBE_MAC_RGMII_CTRL_SETTING);
  917. break;
  918. }
  919. iowrite32(rgmii, &hw->reg->RGMII_CTRL);
  920. #else /* GMII */
  921. rgmii = 0;
  922. iowrite32(rgmii, &hw->reg->RGMII_CTRL);
  923. #endif
  924. }
  925. static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
  926. u16 duplex)
  927. {
  928. struct net_device *netdev = adapter->netdev;
  929. struct pch_gbe_hw *hw = &adapter->hw;
  930. unsigned long mode = 0;
  931. /* Set the communication mode */
  932. switch (speed) {
  933. case SPEED_10:
  934. mode = PCH_GBE_MODE_MII_ETHER;
  935. netdev->tx_queue_len = 10;
  936. break;
  937. case SPEED_100:
  938. mode = PCH_GBE_MODE_MII_ETHER;
  939. netdev->tx_queue_len = 100;
  940. break;
  941. case SPEED_1000:
  942. mode = PCH_GBE_MODE_GMII_ETHER;
  943. break;
  944. }
  945. if (duplex == DUPLEX_FULL)
  946. mode |= PCH_GBE_MODE_FULL_DUPLEX;
  947. else
  948. mode |= PCH_GBE_MODE_HALF_DUPLEX;
  949. iowrite32(mode, &hw->reg->MODE);
  950. }
  951. /**
  952. * pch_gbe_watchdog - Watchdog process
  953. * @data: Board private structure
  954. */
  955. static void pch_gbe_watchdog(unsigned long data)
  956. {
  957. struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
  958. struct net_device *netdev = adapter->netdev;
  959. struct pch_gbe_hw *hw = &adapter->hw;
  960. pr_debug("right now = %ld\n", jiffies);
  961. pch_gbe_update_stats(adapter);
  962. if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
  963. struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
  964. netdev->tx_queue_len = adapter->tx_queue_len;
  965. /* mii library handles link maintenance tasks */
  966. if (mii_ethtool_gset(&adapter->mii, &cmd)) {
  967. pr_err("ethtool get setting Error\n");
  968. mod_timer(&adapter->watchdog_timer,
  969. round_jiffies(jiffies +
  970. PCH_GBE_WATCHDOG_PERIOD));
  971. return;
  972. }
  973. hw->mac.link_speed = ethtool_cmd_speed(&cmd);
  974. hw->mac.link_duplex = cmd.duplex;
  975. /* Set the RGMII control. */
  976. pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
  977. hw->mac.link_duplex);
  978. /* Set the communication mode */
  979. pch_gbe_set_mode(adapter, hw->mac.link_speed,
  980. hw->mac.link_duplex);
  981. netdev_dbg(netdev,
  982. "Link is Up %d Mbps %s-Duplex\n",
  983. hw->mac.link_speed,
  984. cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
  985. netif_carrier_on(netdev);
  986. netif_wake_queue(netdev);
  987. } else if ((!mii_link_ok(&adapter->mii)) &&
  988. (netif_carrier_ok(netdev))) {
  989. netdev_dbg(netdev, "NIC Link is Down\n");
  990. hw->mac.link_speed = SPEED_10;
  991. hw->mac.link_duplex = DUPLEX_HALF;
  992. netif_carrier_off(netdev);
  993. netif_stop_queue(netdev);
  994. }
  995. mod_timer(&adapter->watchdog_timer,
  996. round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
  997. }
  998. /**
  999. * pch_gbe_tx_queue - Carry out queuing of the transmission data
  1000. * @adapter: Board private structure
  1001. * @tx_ring: Tx descriptor ring structure
  1002. * @skb: Sockt buffer structure
  1003. */
  1004. static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
  1005. struct pch_gbe_tx_ring *tx_ring,
  1006. struct sk_buff *skb)
  1007. {
  1008. struct pch_gbe_hw *hw = &adapter->hw;
  1009. struct pch_gbe_tx_desc *tx_desc;
  1010. struct pch_gbe_buffer *buffer_info;
  1011. struct sk_buff *tmp_skb;
  1012. unsigned int frame_ctrl;
  1013. unsigned int ring_num;
  1014. /*-- Set frame control --*/
  1015. frame_ctrl = 0;
  1016. if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
  1017. frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
  1018. if (skb->ip_summed == CHECKSUM_NONE)
  1019. frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
  1020. /* Performs checksum processing */
  1021. /*
  1022. * It is because the hardware accelerator does not support a checksum,
  1023. * when the received data size is less than 64 bytes.
  1024. */
  1025. if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
  1026. frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
  1027. PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
  1028. if (skb->protocol == htons(ETH_P_IP)) {
  1029. struct iphdr *iph = ip_hdr(skb);
  1030. unsigned int offset;
  1031. iph->check = 0;
  1032. iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
  1033. offset = skb_transport_offset(skb);
  1034. if (iph->protocol == IPPROTO_TCP) {
  1035. skb->csum = 0;
  1036. tcp_hdr(skb)->check = 0;
  1037. skb->csum = skb_checksum(skb, offset,
  1038. skb->len - offset, 0);
  1039. tcp_hdr(skb)->check =
  1040. csum_tcpudp_magic(iph->saddr,
  1041. iph->daddr,
  1042. skb->len - offset,
  1043. IPPROTO_TCP,
  1044. skb->csum);
  1045. } else if (iph->protocol == IPPROTO_UDP) {
  1046. skb->csum = 0;
  1047. udp_hdr(skb)->check = 0;
  1048. skb->csum =
  1049. skb_checksum(skb, offset,
  1050. skb->len - offset, 0);
  1051. udp_hdr(skb)->check =
  1052. csum_tcpudp_magic(iph->saddr,
  1053. iph->daddr,
  1054. skb->len - offset,
  1055. IPPROTO_UDP,
  1056. skb->csum);
  1057. }
  1058. }
  1059. }
  1060. ring_num = tx_ring->next_to_use;
  1061. if (unlikely((ring_num + 1) == tx_ring->count))
  1062. tx_ring->next_to_use = 0;
  1063. else
  1064. tx_ring->next_to_use = ring_num + 1;
  1065. buffer_info = &tx_ring->buffer_info[ring_num];
  1066. tmp_skb = buffer_info->skb;
  1067. /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
  1068. memcpy(tmp_skb->data, skb->data, ETH_HLEN);
  1069. tmp_skb->data[ETH_HLEN] = 0x00;
  1070. tmp_skb->data[ETH_HLEN + 1] = 0x00;
  1071. tmp_skb->len = skb->len;
  1072. memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
  1073. (skb->len - ETH_HLEN));
  1074. /*-- Set Buffer information --*/
  1075. buffer_info->length = tmp_skb->len;
  1076. buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
  1077. buffer_info->length,
  1078. DMA_TO_DEVICE);
  1079. if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
  1080. pr_err("TX DMA map failed\n");
  1081. buffer_info->dma = 0;
  1082. buffer_info->time_stamp = 0;
  1083. tx_ring->next_to_use = ring_num;
  1084. return;
  1085. }
  1086. buffer_info->mapped = true;
  1087. buffer_info->time_stamp = jiffies;
  1088. /*-- Set Tx descriptor --*/
  1089. tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
  1090. tx_desc->buffer_addr = (buffer_info->dma);
  1091. tx_desc->length = (tmp_skb->len);
  1092. tx_desc->tx_words_eob = ((tmp_skb->len + 3));
  1093. tx_desc->tx_frame_ctrl = (frame_ctrl);
  1094. tx_desc->gbec_status = (DSC_INIT16);
  1095. if (unlikely(++ring_num == tx_ring->count))
  1096. ring_num = 0;
  1097. /* Update software pointer of TX descriptor */
  1098. iowrite32(tx_ring->dma +
  1099. (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
  1100. &hw->reg->TX_DSC_SW_P);
  1101. #ifdef CONFIG_PCH_PTP
  1102. pch_tx_timestamp(adapter, skb);
  1103. #endif
  1104. dev_kfree_skb_any(skb);
  1105. }
  1106. /**
  1107. * pch_gbe_update_stats - Update the board statistics counters
  1108. * @adapter: Board private structure
  1109. */
  1110. void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
  1111. {
  1112. struct net_device *netdev = adapter->netdev;
  1113. struct pci_dev *pdev = adapter->pdev;
  1114. struct pch_gbe_hw_stats *stats = &adapter->stats;
  1115. unsigned long flags;
  1116. /*
  1117. * Prevent stats update while adapter is being reset, or if the pci
  1118. * connection is down.
  1119. */
  1120. if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
  1121. return;
  1122. spin_lock_irqsave(&adapter->stats_lock, flags);
  1123. /* Update device status "adapter->stats" */
  1124. stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
  1125. stats->tx_errors = stats->tx_length_errors +
  1126. stats->tx_aborted_errors +
  1127. stats->tx_carrier_errors + stats->tx_timeout_count;
  1128. /* Update network device status "adapter->net_stats" */
  1129. netdev->stats.rx_packets = stats->rx_packets;
  1130. netdev->stats.rx_bytes = stats->rx_bytes;
  1131. netdev->stats.rx_dropped = stats->rx_dropped;
  1132. netdev->stats.tx_packets = stats->tx_packets;
  1133. netdev->stats.tx_bytes = stats->tx_bytes;
  1134. netdev->stats.tx_dropped = stats->tx_dropped;
  1135. /* Fill out the OS statistics structure */
  1136. netdev->stats.multicast = stats->multicast;
  1137. netdev->stats.collisions = stats->collisions;
  1138. /* Rx Errors */
  1139. netdev->stats.rx_errors = stats->rx_errors;
  1140. netdev->stats.rx_crc_errors = stats->rx_crc_errors;
  1141. netdev->stats.rx_frame_errors = stats->rx_frame_errors;
  1142. /* Tx Errors */
  1143. netdev->stats.tx_errors = stats->tx_errors;
  1144. netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
  1145. netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
  1146. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  1147. }
  1148. static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
  1149. {
  1150. struct pch_gbe_hw *hw = &adapter->hw;
  1151. u32 rxdma;
  1152. u16 value;
  1153. int ret;
  1154. /* Disable Receive DMA */
  1155. rxdma = ioread32(&hw->reg->DMA_CTRL);
  1156. rxdma &= ~PCH_GBE_RX_DMA_EN;
  1157. iowrite32(rxdma, &hw->reg->DMA_CTRL);
  1158. /* Wait Rx DMA BUS is IDLE */
  1159. ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
  1160. if (ret) {
  1161. /* Disable Bus master */
  1162. pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
  1163. value &= ~PCI_COMMAND_MASTER;
  1164. pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
  1165. /* Stop Receive */
  1166. pch_gbe_mac_reset_rx(hw);
  1167. /* Enable Bus master */
  1168. value |= PCI_COMMAND_MASTER;
  1169. pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
  1170. } else {
  1171. /* Stop Receive */
  1172. pch_gbe_mac_reset_rx(hw);
  1173. }
  1174. }
  1175. static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
  1176. {
  1177. u32 rxdma;
  1178. /* Enables Receive DMA */
  1179. rxdma = ioread32(&hw->reg->DMA_CTRL);
  1180. rxdma |= PCH_GBE_RX_DMA_EN;
  1181. iowrite32(rxdma, &hw->reg->DMA_CTRL);
  1182. /* Enables Receive */
  1183. iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
  1184. return;
  1185. }
  1186. /**
  1187. * pch_gbe_intr - Interrupt Handler
  1188. * @irq: Interrupt number
  1189. * @data: Pointer to a network interface device structure
  1190. * Returns
  1191. * - IRQ_HANDLED: Our interrupt
  1192. * - IRQ_NONE: Not our interrupt
  1193. */
  1194. static irqreturn_t pch_gbe_intr(int irq, void *data)
  1195. {
  1196. struct net_device *netdev = data;
  1197. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1198. struct pch_gbe_hw *hw = &adapter->hw;
  1199. u32 int_st;
  1200. u32 int_en;
  1201. /* Check request status */
  1202. int_st = ioread32(&hw->reg->INT_ST);
  1203. int_st = int_st & ioread32(&hw->reg->INT_EN);
  1204. /* When request status is no interruption factor */
  1205. if (unlikely(!int_st))
  1206. return IRQ_NONE; /* Not our interrupt. End processing. */
  1207. pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
  1208. if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
  1209. adapter->stats.intr_rx_frame_err_count++;
  1210. if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
  1211. if (!adapter->rx_stop_flag) {
  1212. adapter->stats.intr_rx_fifo_err_count++;
  1213. pr_debug("Rx fifo over run\n");
  1214. adapter->rx_stop_flag = true;
  1215. int_en = ioread32(&hw->reg->INT_EN);
  1216. iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
  1217. &hw->reg->INT_EN);
  1218. pch_gbe_stop_receive(adapter);
  1219. int_st |= ioread32(&hw->reg->INT_ST);
  1220. int_st = int_st & ioread32(&hw->reg->INT_EN);
  1221. }
  1222. if (int_st & PCH_GBE_INT_RX_DMA_ERR)
  1223. adapter->stats.intr_rx_dma_err_count++;
  1224. if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
  1225. adapter->stats.intr_tx_fifo_err_count++;
  1226. if (int_st & PCH_GBE_INT_TX_DMA_ERR)
  1227. adapter->stats.intr_tx_dma_err_count++;
  1228. if (int_st & PCH_GBE_INT_TCPIP_ERR)
  1229. adapter->stats.intr_tcpip_err_count++;
  1230. /* When Rx descriptor is empty */
  1231. if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
  1232. adapter->stats.intr_rx_dsc_empty_count++;
  1233. pr_debug("Rx descriptor is empty\n");
  1234. int_en = ioread32(&hw->reg->INT_EN);
  1235. iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
  1236. if (hw->mac.tx_fc_enable) {
  1237. /* Set Pause packet */
  1238. pch_gbe_mac_set_pause_packet(hw);
  1239. }
  1240. }
  1241. /* When request status is Receive interruption */
  1242. if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
  1243. (adapter->rx_stop_flag)) {
  1244. if (likely(napi_schedule_prep(&adapter->napi))) {
  1245. /* Enable only Rx Descriptor empty */
  1246. atomic_inc(&adapter->irq_sem);
  1247. int_en = ioread32(&hw->reg->INT_EN);
  1248. int_en &=
  1249. ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
  1250. iowrite32(int_en, &hw->reg->INT_EN);
  1251. /* Start polling for NAPI */
  1252. __napi_schedule(&adapter->napi);
  1253. }
  1254. }
  1255. pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
  1256. IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
  1257. return IRQ_HANDLED;
  1258. }
  1259. /**
  1260. * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  1261. * @adapter: Board private structure
  1262. * @rx_ring: Rx descriptor ring
  1263. * @cleaned_count: Cleaned count
  1264. */
  1265. static void
  1266. pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
  1267. struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
  1268. {
  1269. struct net_device *netdev = adapter->netdev;
  1270. struct pci_dev *pdev = adapter->pdev;
  1271. struct pch_gbe_hw *hw = &adapter->hw;
  1272. struct pch_gbe_rx_desc *rx_desc;
  1273. struct pch_gbe_buffer *buffer_info;
  1274. struct sk_buff *skb;
  1275. unsigned int i;
  1276. unsigned int bufsz;
  1277. bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
  1278. i = rx_ring->next_to_use;
  1279. while ((cleaned_count--)) {
  1280. buffer_info = &rx_ring->buffer_info[i];
  1281. skb = netdev_alloc_skb(netdev, bufsz);
  1282. if (unlikely(!skb)) {
  1283. /* Better luck next round */
  1284. adapter->stats.rx_alloc_buff_failed++;
  1285. break;
  1286. }
  1287. /* align */
  1288. skb_reserve(skb, NET_IP_ALIGN);
  1289. buffer_info->skb = skb;
  1290. buffer_info->dma = dma_map_single(&pdev->dev,
  1291. buffer_info->rx_buffer,
  1292. buffer_info->length,
  1293. DMA_FROM_DEVICE);
  1294. if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
  1295. dev_kfree_skb(skb);
  1296. buffer_info->skb = NULL;
  1297. buffer_info->dma = 0;
  1298. adapter->stats.rx_alloc_buff_failed++;
  1299. break; /* while !buffer_info->skb */
  1300. }
  1301. buffer_info->mapped = true;
  1302. rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
  1303. rx_desc->buffer_addr = (buffer_info->dma);
  1304. rx_desc->gbec_status = DSC_INIT16;
  1305. pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
  1306. i, (unsigned long long)buffer_info->dma,
  1307. buffer_info->length);
  1308. if (unlikely(++i == rx_ring->count))
  1309. i = 0;
  1310. }
  1311. if (likely(rx_ring->next_to_use != i)) {
  1312. rx_ring->next_to_use = i;
  1313. if (unlikely(i-- == 0))
  1314. i = (rx_ring->count - 1);
  1315. iowrite32(rx_ring->dma +
  1316. (int)sizeof(struct pch_gbe_rx_desc) * i,
  1317. &hw->reg->RX_DSC_SW_P);
  1318. }
  1319. return;
  1320. }
  1321. static int
  1322. pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
  1323. struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
  1324. {
  1325. struct pci_dev *pdev = adapter->pdev;
  1326. struct pch_gbe_buffer *buffer_info;
  1327. unsigned int i;
  1328. unsigned int bufsz;
  1329. unsigned int size;
  1330. bufsz = adapter->rx_buffer_len;
  1331. size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
  1332. rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
  1333. &rx_ring->rx_buff_pool_logic,
  1334. GFP_KERNEL);
  1335. if (!rx_ring->rx_buff_pool) {
  1336. pr_err("Unable to allocate memory for the receive pool buffer\n");
  1337. return -ENOMEM;
  1338. }
  1339. memset(rx_ring->rx_buff_pool, 0, size);
  1340. rx_ring->rx_buff_pool_size = size;
  1341. for (i = 0; i < rx_ring->count; i++) {
  1342. buffer_info = &rx_ring->buffer_info[i];
  1343. buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
  1344. buffer_info->length = bufsz;
  1345. }
  1346. return 0;
  1347. }
  1348. /**
  1349. * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
  1350. * @adapter: Board private structure
  1351. * @tx_ring: Tx descriptor ring
  1352. */
  1353. static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
  1354. struct pch_gbe_tx_ring *tx_ring)
  1355. {
  1356. struct pch_gbe_buffer *buffer_info;
  1357. struct sk_buff *skb;
  1358. unsigned int i;
  1359. unsigned int bufsz;
  1360. struct pch_gbe_tx_desc *tx_desc;
  1361. bufsz =
  1362. adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
  1363. for (i = 0; i < tx_ring->count; i++) {
  1364. buffer_info = &tx_ring->buffer_info[i];
  1365. skb = netdev_alloc_skb(adapter->netdev, bufsz);
  1366. skb_reserve(skb, PCH_GBE_DMA_ALIGN);
  1367. buffer_info->skb = skb;
  1368. tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
  1369. tx_desc->gbec_status = (DSC_INIT16);
  1370. }
  1371. return;
  1372. }
  1373. /**
  1374. * pch_gbe_clean_tx - Reclaim resources after transmit completes
  1375. * @adapter: Board private structure
  1376. * @tx_ring: Tx descriptor ring
  1377. * Returns
  1378. * true: Cleaned the descriptor
  1379. * false: Not cleaned the descriptor
  1380. */
  1381. static bool
  1382. pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
  1383. struct pch_gbe_tx_ring *tx_ring)
  1384. {
  1385. struct pch_gbe_tx_desc *tx_desc;
  1386. struct pch_gbe_buffer *buffer_info;
  1387. struct sk_buff *skb;
  1388. unsigned int i;
  1389. unsigned int cleaned_count = 0;
  1390. bool cleaned = true;
  1391. pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
  1392. i = tx_ring->next_to_clean;
  1393. tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
  1394. pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
  1395. tx_desc->gbec_status, tx_desc->dma_status);
  1396. while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
  1397. pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
  1398. buffer_info = &tx_ring->buffer_info[i];
  1399. skb = buffer_info->skb;
  1400. if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
  1401. adapter->stats.tx_aborted_errors++;
  1402. pr_err("Transfer Abort Error\n");
  1403. } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
  1404. ) {
  1405. adapter->stats.tx_carrier_errors++;
  1406. pr_err("Transfer Carrier Sense Error\n");
  1407. } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
  1408. ) {
  1409. adapter->stats.tx_aborted_errors++;
  1410. pr_err("Transfer Collision Abort Error\n");
  1411. } else if ((tx_desc->gbec_status &
  1412. (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
  1413. PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
  1414. adapter->stats.collisions++;
  1415. adapter->stats.tx_packets++;
  1416. adapter->stats.tx_bytes += skb->len;
  1417. pr_debug("Transfer Collision\n");
  1418. } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
  1419. ) {
  1420. adapter->stats.tx_packets++;
  1421. adapter->stats.tx_bytes += skb->len;
  1422. }
  1423. if (buffer_info->mapped) {
  1424. pr_debug("unmap buffer_info->dma : %d\n", i);
  1425. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  1426. buffer_info->length, DMA_TO_DEVICE);
  1427. buffer_info->mapped = false;
  1428. }
  1429. if (buffer_info->skb) {
  1430. pr_debug("trim buffer_info->skb : %d\n", i);
  1431. skb_trim(buffer_info->skb, 0);
  1432. }
  1433. tx_desc->gbec_status = DSC_INIT16;
  1434. if (unlikely(++i == tx_ring->count))
  1435. i = 0;
  1436. tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
  1437. /* weight of a sort for tx, to avoid endless transmit cleanup */
  1438. if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
  1439. cleaned = false;
  1440. break;
  1441. }
  1442. }
  1443. pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
  1444. cleaned_count);
  1445. /* Recover from running out of Tx resources in xmit_frame */
  1446. spin_lock(&tx_ring->tx_lock);
  1447. if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
  1448. netif_wake_queue(adapter->netdev);
  1449. adapter->stats.tx_restart_count++;
  1450. pr_debug("Tx wake queue\n");
  1451. }
  1452. tx_ring->next_to_clean = i;
  1453. pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
  1454. spin_unlock(&tx_ring->tx_lock);
  1455. return cleaned;
  1456. }
  1457. /**
  1458. * pch_gbe_clean_rx - Send received data up the network stack; legacy
  1459. * @adapter: Board private structure
  1460. * @rx_ring: Rx descriptor ring
  1461. * @work_done: Completed count
  1462. * @work_to_do: Request count
  1463. * Returns
  1464. * true: Cleaned the descriptor
  1465. * false: Not cleaned the descriptor
  1466. */
  1467. static bool
  1468. pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
  1469. struct pch_gbe_rx_ring *rx_ring,
  1470. int *work_done, int work_to_do)
  1471. {
  1472. struct net_device *netdev = adapter->netdev;
  1473. struct pci_dev *pdev = adapter->pdev;
  1474. struct pch_gbe_buffer *buffer_info;
  1475. struct pch_gbe_rx_desc *rx_desc;
  1476. u32 length;
  1477. unsigned int i;
  1478. unsigned int cleaned_count = 0;
  1479. bool cleaned = false;
  1480. struct sk_buff *skb;
  1481. u8 dma_status;
  1482. u16 gbec_status;
  1483. u32 tcp_ip_status;
  1484. i = rx_ring->next_to_clean;
  1485. while (*work_done < work_to_do) {
  1486. /* Check Rx descriptor status */
  1487. rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
  1488. if (rx_desc->gbec_status == DSC_INIT16)
  1489. break;
  1490. cleaned = true;
  1491. cleaned_count++;
  1492. dma_status = rx_desc->dma_status;
  1493. gbec_status = rx_desc->gbec_status;
  1494. tcp_ip_status = rx_desc->tcp_ip_status;
  1495. rx_desc->gbec_status = DSC_INIT16;
  1496. buffer_info = &rx_ring->buffer_info[i];
  1497. skb = buffer_info->skb;
  1498. buffer_info->skb = NULL;
  1499. /* unmap dma */
  1500. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1501. buffer_info->length, DMA_FROM_DEVICE);
  1502. buffer_info->mapped = false;
  1503. pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
  1504. "TCP:0x%08x] BufInf = 0x%p\n",
  1505. i, dma_status, gbec_status, tcp_ip_status,
  1506. buffer_info);
  1507. /* Error check */
  1508. if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
  1509. adapter->stats.rx_frame_errors++;
  1510. pr_err("Receive Not Octal Error\n");
  1511. } else if (unlikely(gbec_status &
  1512. PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
  1513. adapter->stats.rx_frame_errors++;
  1514. pr_err("Receive Nibble Error\n");
  1515. } else if (unlikely(gbec_status &
  1516. PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
  1517. adapter->stats.rx_crc_errors++;
  1518. pr_err("Receive CRC Error\n");
  1519. } else {
  1520. /* get receive length */
  1521. /* length convert[-3], length includes FCS length */
  1522. length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
  1523. if (rx_desc->rx_words_eob & 0x02)
  1524. length = length - 4;
  1525. /*
  1526. * buffer_info->rx_buffer: [Header:14][payload]
  1527. * skb->data: [Reserve:2][Header:14][payload]
  1528. */
  1529. memcpy(skb->data, buffer_info->rx_buffer, length);
  1530. /* update status of driver */
  1531. adapter->stats.rx_bytes += length;
  1532. adapter->stats.rx_packets++;
  1533. if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
  1534. adapter->stats.multicast++;
  1535. /* Write meta date of skb */
  1536. skb_put(skb, length);
  1537. #ifdef CONFIG_PCH_PTP
  1538. pch_rx_timestamp(adapter, skb);
  1539. #endif
  1540. skb->protocol = eth_type_trans(skb, netdev);
  1541. if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
  1542. skb->ip_summed = CHECKSUM_NONE;
  1543. else
  1544. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1545. napi_gro_receive(&adapter->napi, skb);
  1546. (*work_done)++;
  1547. pr_debug("Receive skb->ip_summed: %d length: %d\n",
  1548. skb->ip_summed, length);
  1549. }
  1550. /* return some buffers to hardware, one at a time is too slow */
  1551. if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
  1552. pch_gbe_alloc_rx_buffers(adapter, rx_ring,
  1553. cleaned_count);
  1554. cleaned_count = 0;
  1555. }
  1556. if (++i == rx_ring->count)
  1557. i = 0;
  1558. }
  1559. rx_ring->next_to_clean = i;
  1560. if (cleaned_count)
  1561. pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
  1562. return cleaned;
  1563. }
  1564. /**
  1565. * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
  1566. * @adapter: Board private structure
  1567. * @tx_ring: Tx descriptor ring (for a specific queue) to setup
  1568. * Returns
  1569. * 0: Successfully
  1570. * Negative value: Failed
  1571. */
  1572. int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
  1573. struct pch_gbe_tx_ring *tx_ring)
  1574. {
  1575. struct pci_dev *pdev = adapter->pdev;
  1576. struct pch_gbe_tx_desc *tx_desc;
  1577. int size;
  1578. int desNo;
  1579. size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
  1580. tx_ring->buffer_info = vzalloc(size);
  1581. if (!tx_ring->buffer_info)
  1582. return -ENOMEM;
  1583. tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
  1584. tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
  1585. &tx_ring->dma, GFP_KERNEL);
  1586. if (!tx_ring->desc) {
  1587. vfree(tx_ring->buffer_info);
  1588. pr_err("Unable to allocate memory for the transmit descriptor ring\n");
  1589. return -ENOMEM;
  1590. }
  1591. memset(tx_ring->desc, 0, tx_ring->size);
  1592. tx_ring->next_to_use = 0;
  1593. tx_ring->next_to_clean = 0;
  1594. spin_lock_init(&tx_ring->tx_lock);
  1595. for (desNo = 0; desNo < tx_ring->count; desNo++) {
  1596. tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
  1597. tx_desc->gbec_status = DSC_INIT16;
  1598. }
  1599. pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
  1600. "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
  1601. tx_ring->desc, (unsigned long long)tx_ring->dma,
  1602. tx_ring->next_to_clean, tx_ring->next_to_use);
  1603. return 0;
  1604. }
  1605. /**
  1606. * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
  1607. * @adapter: Board private structure
  1608. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  1609. * Returns
  1610. * 0: Successfully
  1611. * Negative value: Failed
  1612. */
  1613. int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
  1614. struct pch_gbe_rx_ring *rx_ring)
  1615. {
  1616. struct pci_dev *pdev = adapter->pdev;
  1617. struct pch_gbe_rx_desc *rx_desc;
  1618. int size;
  1619. int desNo;
  1620. size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
  1621. rx_ring->buffer_info = vzalloc(size);
  1622. if (!rx_ring->buffer_info)
  1623. return -ENOMEM;
  1624. rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
  1625. rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
  1626. &rx_ring->dma, GFP_KERNEL);
  1627. if (!rx_ring->desc) {
  1628. pr_err("Unable to allocate memory for the receive descriptor ring\n");
  1629. vfree(rx_ring->buffer_info);
  1630. return -ENOMEM;
  1631. }
  1632. memset(rx_ring->desc, 0, rx_ring->size);
  1633. rx_ring->next_to_clean = 0;
  1634. rx_ring->next_to_use = 0;
  1635. for (desNo = 0; desNo < rx_ring->count; desNo++) {
  1636. rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
  1637. rx_desc->gbec_status = DSC_INIT16;
  1638. }
  1639. pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
  1640. "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
  1641. rx_ring->desc, (unsigned long long)rx_ring->dma,
  1642. rx_ring->next_to_clean, rx_ring->next_to_use);
  1643. return 0;
  1644. }
  1645. /**
  1646. * pch_gbe_free_tx_resources - Free Tx Resources
  1647. * @adapter: Board private structure
  1648. * @tx_ring: Tx descriptor ring for a specific queue
  1649. */
  1650. void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
  1651. struct pch_gbe_tx_ring *tx_ring)
  1652. {
  1653. struct pci_dev *pdev = adapter->pdev;
  1654. pch_gbe_clean_tx_ring(adapter, tx_ring);
  1655. vfree(tx_ring->buffer_info);
  1656. tx_ring->buffer_info = NULL;
  1657. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  1658. tx_ring->desc = NULL;
  1659. }
  1660. /**
  1661. * pch_gbe_free_rx_resources - Free Rx Resources
  1662. * @adapter: Board private structure
  1663. * @rx_ring: Ring to clean the resources from
  1664. */
  1665. void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
  1666. struct pch_gbe_rx_ring *rx_ring)
  1667. {
  1668. struct pci_dev *pdev = adapter->pdev;
  1669. pch_gbe_clean_rx_ring(adapter, rx_ring);
  1670. vfree(rx_ring->buffer_info);
  1671. rx_ring->buffer_info = NULL;
  1672. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  1673. rx_ring->desc = NULL;
  1674. }
  1675. /**
  1676. * pch_gbe_request_irq - Allocate an interrupt line
  1677. * @adapter: Board private structure
  1678. * Returns
  1679. * 0: Successfully
  1680. * Negative value: Failed
  1681. */
  1682. static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
  1683. {
  1684. struct net_device *netdev = adapter->netdev;
  1685. int err;
  1686. int flags;
  1687. flags = IRQF_SHARED;
  1688. adapter->have_msi = false;
  1689. err = pci_enable_msi(adapter->pdev);
  1690. pr_debug("call pci_enable_msi\n");
  1691. if (err) {
  1692. pr_debug("call pci_enable_msi - Error: %d\n", err);
  1693. } else {
  1694. flags = 0;
  1695. adapter->have_msi = true;
  1696. }
  1697. err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
  1698. flags, netdev->name, netdev);
  1699. if (err)
  1700. pr_err("Unable to allocate interrupt Error: %d\n", err);
  1701. pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
  1702. adapter->have_msi, flags, err);
  1703. return err;
  1704. }
  1705. static void pch_gbe_set_multi(struct net_device *netdev);
  1706. /**
  1707. * pch_gbe_up - Up GbE network device
  1708. * @adapter: Board private structure
  1709. * Returns
  1710. * 0: Successfully
  1711. * Negative value: Failed
  1712. */
  1713. int pch_gbe_up(struct pch_gbe_adapter *adapter)
  1714. {
  1715. struct net_device *netdev = adapter->netdev;
  1716. struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
  1717. struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
  1718. int err;
  1719. /* Ensure we have a valid MAC */
  1720. if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
  1721. pr_err("Error: Invalid MAC address\n");
  1722. return -EINVAL;
  1723. }
  1724. /* hardware has been reset, we need to reload some things */
  1725. pch_gbe_set_multi(netdev);
  1726. pch_gbe_setup_tctl(adapter);
  1727. pch_gbe_configure_tx(adapter);
  1728. pch_gbe_setup_rctl(adapter);
  1729. pch_gbe_configure_rx(adapter);
  1730. err = pch_gbe_request_irq(adapter);
  1731. if (err) {
  1732. pr_err("Error: can't bring device up\n");
  1733. return err;
  1734. }
  1735. err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
  1736. if (err) {
  1737. pr_err("Error: can't bring device up\n");
  1738. return err;
  1739. }
  1740. pch_gbe_alloc_tx_buffers(adapter, tx_ring);
  1741. pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
  1742. adapter->tx_queue_len = netdev->tx_queue_len;
  1743. pch_gbe_start_receive(&adapter->hw);
  1744. mod_timer(&adapter->watchdog_timer, jiffies);
  1745. napi_enable(&adapter->napi);
  1746. pch_gbe_irq_enable(adapter);
  1747. netif_start_queue(adapter->netdev);
  1748. return 0;
  1749. }
  1750. /**
  1751. * pch_gbe_down - Down GbE network device
  1752. * @adapter: Board private structure
  1753. */
  1754. void pch_gbe_down(struct pch_gbe_adapter *adapter)
  1755. {
  1756. struct net_device *netdev = adapter->netdev;
  1757. struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
  1758. /* signal that we're down so the interrupt handler does not
  1759. * reschedule our watchdog timer */
  1760. napi_disable(&adapter->napi);
  1761. atomic_set(&adapter->irq_sem, 0);
  1762. pch_gbe_irq_disable(adapter);
  1763. pch_gbe_free_irq(adapter);
  1764. del_timer_sync(&adapter->watchdog_timer);
  1765. netdev->tx_queue_len = adapter->tx_queue_len;
  1766. netif_carrier_off(netdev);
  1767. netif_stop_queue(netdev);
  1768. pch_gbe_reset(adapter);
  1769. pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
  1770. pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
  1771. pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
  1772. rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
  1773. rx_ring->rx_buff_pool_logic = 0;
  1774. rx_ring->rx_buff_pool_size = 0;
  1775. rx_ring->rx_buff_pool = NULL;
  1776. }
  1777. /**
  1778. * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
  1779. * @adapter: Board private structure to initialize
  1780. * Returns
  1781. * 0: Successfully
  1782. * Negative value: Failed
  1783. */
  1784. static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
  1785. {
  1786. struct pch_gbe_hw *hw = &adapter->hw;
  1787. struct net_device *netdev = adapter->netdev;
  1788. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
  1789. hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1790. hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  1791. /* Initialize the hardware-specific values */
  1792. if (pch_gbe_hal_setup_init_funcs(hw)) {
  1793. pr_err("Hardware Initialization Failure\n");
  1794. return -EIO;
  1795. }
  1796. if (pch_gbe_alloc_queues(adapter)) {
  1797. pr_err("Unable to allocate memory for queues\n");
  1798. return -ENOMEM;
  1799. }
  1800. spin_lock_init(&adapter->hw.miim_lock);
  1801. spin_lock_init(&adapter->stats_lock);
  1802. spin_lock_init(&adapter->ethtool_lock);
  1803. atomic_set(&adapter->irq_sem, 0);
  1804. pch_gbe_irq_disable(adapter);
  1805. pch_gbe_init_stats(adapter);
  1806. pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
  1807. (u32) adapter->rx_buffer_len,
  1808. hw->mac.min_frame_size, hw->mac.max_frame_size);
  1809. return 0;
  1810. }
  1811. /**
  1812. * pch_gbe_open - Called when a network interface is made active
  1813. * @netdev: Network interface device structure
  1814. * Returns
  1815. * 0: Successfully
  1816. * Negative value: Failed
  1817. */
  1818. static int pch_gbe_open(struct net_device *netdev)
  1819. {
  1820. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1821. struct pch_gbe_hw *hw = &adapter->hw;
  1822. int err;
  1823. /* allocate transmit descriptors */
  1824. err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
  1825. if (err)
  1826. goto err_setup_tx;
  1827. /* allocate receive descriptors */
  1828. err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
  1829. if (err)
  1830. goto err_setup_rx;
  1831. pch_gbe_hal_power_up_phy(hw);
  1832. err = pch_gbe_up(adapter);
  1833. if (err)
  1834. goto err_up;
  1835. pr_debug("Success End\n");
  1836. return 0;
  1837. err_up:
  1838. if (!adapter->wake_up_evt)
  1839. pch_gbe_hal_power_down_phy(hw);
  1840. pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
  1841. err_setup_rx:
  1842. pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
  1843. err_setup_tx:
  1844. pch_gbe_reset(adapter);
  1845. pr_err("Error End\n");
  1846. return err;
  1847. }
  1848. /**
  1849. * pch_gbe_stop - Disables a network interface
  1850. * @netdev: Network interface device structure
  1851. * Returns
  1852. * 0: Successfully
  1853. */
  1854. static int pch_gbe_stop(struct net_device *netdev)
  1855. {
  1856. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1857. struct pch_gbe_hw *hw = &adapter->hw;
  1858. pch_gbe_down(adapter);
  1859. if (!adapter->wake_up_evt)
  1860. pch_gbe_hal_power_down_phy(hw);
  1861. pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
  1862. pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
  1863. return 0;
  1864. }
  1865. /**
  1866. * pch_gbe_xmit_frame - Packet transmitting start
  1867. * @skb: Socket buffer structure
  1868. * @netdev: Network interface device structure
  1869. * Returns
  1870. * - NETDEV_TX_OK: Normal end
  1871. * - NETDEV_TX_BUSY: Error end
  1872. */
  1873. static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1874. {
  1875. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1876. struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
  1877. unsigned long flags;
  1878. if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
  1879. pr_err("Transfer length Error: skb len: %d > max: %d\n",
  1880. skb->len, adapter->hw.mac.max_frame_size);
  1881. dev_kfree_skb_any(skb);
  1882. adapter->stats.tx_length_errors++;
  1883. return NETDEV_TX_OK;
  1884. }
  1885. if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
  1886. /* Collision - tell upper layer to requeue */
  1887. return NETDEV_TX_LOCKED;
  1888. }
  1889. if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
  1890. netif_stop_queue(netdev);
  1891. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  1892. pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
  1893. tx_ring->next_to_use, tx_ring->next_to_clean);
  1894. return NETDEV_TX_BUSY;
  1895. }
  1896. /* CRC,ITAG no support */
  1897. pch_gbe_tx_queue(adapter, tx_ring, skb);
  1898. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  1899. return NETDEV_TX_OK;
  1900. }
  1901. /**
  1902. * pch_gbe_get_stats - Get System Network Statistics
  1903. * @netdev: Network interface device structure
  1904. * Returns: The current stats
  1905. */
  1906. static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
  1907. {
  1908. /* only return the current stats */
  1909. return &netdev->stats;
  1910. }
  1911. /**
  1912. * pch_gbe_set_multi - Multicast and Promiscuous mode set
  1913. * @netdev: Network interface device structure
  1914. */
  1915. static void pch_gbe_set_multi(struct net_device *netdev)
  1916. {
  1917. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1918. struct pch_gbe_hw *hw = &adapter->hw;
  1919. struct netdev_hw_addr *ha;
  1920. u8 *mta_list;
  1921. u32 rctl;
  1922. int i;
  1923. int mc_count;
  1924. pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
  1925. /* Check for Promiscuous and All Multicast modes */
  1926. rctl = ioread32(&hw->reg->RX_MODE);
  1927. mc_count = netdev_mc_count(netdev);
  1928. if ((netdev->flags & IFF_PROMISC)) {
  1929. rctl &= ~PCH_GBE_ADD_FIL_EN;
  1930. rctl &= ~PCH_GBE_MLT_FIL_EN;
  1931. } else if ((netdev->flags & IFF_ALLMULTI)) {
  1932. /* all the multicasting receive permissions */
  1933. rctl |= PCH_GBE_ADD_FIL_EN;
  1934. rctl &= ~PCH_GBE_MLT_FIL_EN;
  1935. } else {
  1936. if (mc_count >= PCH_GBE_MAR_ENTRIES) {
  1937. /* all the multicasting receive permissions */
  1938. rctl |= PCH_GBE_ADD_FIL_EN;
  1939. rctl &= ~PCH_GBE_MLT_FIL_EN;
  1940. } else {
  1941. rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
  1942. }
  1943. }
  1944. iowrite32(rctl, &hw->reg->RX_MODE);
  1945. if (mc_count >= PCH_GBE_MAR_ENTRIES)
  1946. return;
  1947. mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
  1948. if (!mta_list)
  1949. return;
  1950. /* The shared function expects a packed array of only addresses. */
  1951. i = 0;
  1952. netdev_for_each_mc_addr(ha, netdev) {
  1953. if (i == mc_count)
  1954. break;
  1955. memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
  1956. }
  1957. pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
  1958. PCH_GBE_MAR_ENTRIES);
  1959. kfree(mta_list);
  1960. pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
  1961. ioread32(&hw->reg->RX_MODE), mc_count);
  1962. }
  1963. /**
  1964. * pch_gbe_set_mac - Change the Ethernet Address of the NIC
  1965. * @netdev: Network interface device structure
  1966. * @addr: Pointer to an address structure
  1967. * Returns
  1968. * 0: Successfully
  1969. * -EADDRNOTAVAIL: Failed
  1970. */
  1971. static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
  1972. {
  1973. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  1974. struct sockaddr *skaddr = addr;
  1975. int ret_val;
  1976. if (!is_valid_ether_addr(skaddr->sa_data)) {
  1977. ret_val = -EADDRNOTAVAIL;
  1978. } else {
  1979. memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
  1980. memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
  1981. pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
  1982. ret_val = 0;
  1983. }
  1984. pr_debug("ret_val : 0x%08x\n", ret_val);
  1985. pr_debug("dev_addr : %pM\n", netdev->dev_addr);
  1986. pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
  1987. pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
  1988. ioread32(&adapter->hw.reg->mac_adr[0].high),
  1989. ioread32(&adapter->hw.reg->mac_adr[0].low));
  1990. return ret_val;
  1991. }
  1992. /**
  1993. * pch_gbe_change_mtu - Change the Maximum Transfer Unit
  1994. * @netdev: Network interface device structure
  1995. * @new_mtu: New value for maximum frame size
  1996. * Returns
  1997. * 0: Successfully
  1998. * -EINVAL: Failed
  1999. */
  2000. static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
  2001. {
  2002. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2003. int max_frame;
  2004. unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
  2005. int err;
  2006. max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  2007. if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
  2008. (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
  2009. pr_err("Invalid MTU setting\n");
  2010. return -EINVAL;
  2011. }
  2012. if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
  2013. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
  2014. else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
  2015. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
  2016. else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
  2017. adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
  2018. else
  2019. adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
  2020. if (netif_running(netdev)) {
  2021. pch_gbe_down(adapter);
  2022. err = pch_gbe_up(adapter);
  2023. if (err) {
  2024. adapter->rx_buffer_len = old_rx_buffer_len;
  2025. pch_gbe_up(adapter);
  2026. return -ENOMEM;
  2027. } else {
  2028. netdev->mtu = new_mtu;
  2029. adapter->hw.mac.max_frame_size = max_frame;
  2030. }
  2031. } else {
  2032. pch_gbe_reset(adapter);
  2033. netdev->mtu = new_mtu;
  2034. adapter->hw.mac.max_frame_size = max_frame;
  2035. }
  2036. pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
  2037. max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
  2038. adapter->hw.mac.max_frame_size);
  2039. return 0;
  2040. }
  2041. /**
  2042. * pch_gbe_set_features - Reset device after features changed
  2043. * @netdev: Network interface device structure
  2044. * @features: New features
  2045. * Returns
  2046. * 0: HW state updated successfully
  2047. */
  2048. static int pch_gbe_set_features(struct net_device *netdev,
  2049. netdev_features_t features)
  2050. {
  2051. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2052. netdev_features_t changed = features ^ netdev->features;
  2053. if (!(changed & NETIF_F_RXCSUM))
  2054. return 0;
  2055. if (netif_running(netdev))
  2056. pch_gbe_reinit_locked(adapter);
  2057. else
  2058. pch_gbe_reset(adapter);
  2059. return 0;
  2060. }
  2061. /**
  2062. * pch_gbe_ioctl - Controls register through a MII interface
  2063. * @netdev: Network interface device structure
  2064. * @ifr: Pointer to ifr structure
  2065. * @cmd: Control command
  2066. * Returns
  2067. * 0: Successfully
  2068. * Negative value: Failed
  2069. */
  2070. static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  2071. {
  2072. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2073. pr_debug("cmd : 0x%04x\n", cmd);
  2074. #ifdef CONFIG_PCH_PTP
  2075. if (cmd == SIOCSHWTSTAMP)
  2076. return hwtstamp_ioctl(netdev, ifr, cmd);
  2077. #endif
  2078. return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
  2079. }
  2080. /**
  2081. * pch_gbe_tx_timeout - Respond to a Tx Hang
  2082. * @netdev: Network interface device structure
  2083. */
  2084. static void pch_gbe_tx_timeout(struct net_device *netdev)
  2085. {
  2086. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2087. /* Do the reset outside of interrupt context */
  2088. adapter->stats.tx_timeout_count++;
  2089. schedule_work(&adapter->reset_task);
  2090. }
  2091. /**
  2092. * pch_gbe_napi_poll - NAPI receive and transfer polling callback
  2093. * @napi: Pointer of polling device struct
  2094. * @budget: The maximum number of a packet
  2095. * Returns
  2096. * false: Exit the polling mode
  2097. * true: Continue the polling mode
  2098. */
  2099. static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
  2100. {
  2101. struct pch_gbe_adapter *adapter =
  2102. container_of(napi, struct pch_gbe_adapter, napi);
  2103. int work_done = 0;
  2104. bool poll_end_flag = false;
  2105. bool cleaned = false;
  2106. u32 int_en;
  2107. pr_debug("budget : %d\n", budget);
  2108. pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
  2109. cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
  2110. if (!cleaned)
  2111. work_done = budget;
  2112. /* If no Tx and not enough Rx work done,
  2113. * exit the polling mode
  2114. */
  2115. if (work_done < budget)
  2116. poll_end_flag = true;
  2117. if (poll_end_flag) {
  2118. napi_complete(napi);
  2119. if (adapter->rx_stop_flag) {
  2120. adapter->rx_stop_flag = false;
  2121. pch_gbe_start_receive(&adapter->hw);
  2122. }
  2123. pch_gbe_irq_enable(adapter);
  2124. } else
  2125. if (adapter->rx_stop_flag) {
  2126. adapter->rx_stop_flag = false;
  2127. pch_gbe_start_receive(&adapter->hw);
  2128. int_en = ioread32(&adapter->hw.reg->INT_EN);
  2129. iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
  2130. &adapter->hw.reg->INT_EN);
  2131. }
  2132. pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
  2133. poll_end_flag, work_done, budget);
  2134. return work_done;
  2135. }
  2136. #ifdef CONFIG_NET_POLL_CONTROLLER
  2137. /**
  2138. * pch_gbe_netpoll - Used by things like netconsole to send skbs
  2139. * @netdev: Network interface device structure
  2140. */
  2141. static void pch_gbe_netpoll(struct net_device *netdev)
  2142. {
  2143. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2144. disable_irq(adapter->pdev->irq);
  2145. pch_gbe_intr(adapter->pdev->irq, netdev);
  2146. enable_irq(adapter->pdev->irq);
  2147. }
  2148. #endif
  2149. static const struct net_device_ops pch_gbe_netdev_ops = {
  2150. .ndo_open = pch_gbe_open,
  2151. .ndo_stop = pch_gbe_stop,
  2152. .ndo_start_xmit = pch_gbe_xmit_frame,
  2153. .ndo_get_stats = pch_gbe_get_stats,
  2154. .ndo_set_mac_address = pch_gbe_set_mac,
  2155. .ndo_tx_timeout = pch_gbe_tx_timeout,
  2156. .ndo_change_mtu = pch_gbe_change_mtu,
  2157. .ndo_set_features = pch_gbe_set_features,
  2158. .ndo_do_ioctl = pch_gbe_ioctl,
  2159. .ndo_set_rx_mode = pch_gbe_set_multi,
  2160. #ifdef CONFIG_NET_POLL_CONTROLLER
  2161. .ndo_poll_controller = pch_gbe_netpoll,
  2162. #endif
  2163. };
  2164. static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
  2165. pci_channel_state_t state)
  2166. {
  2167. struct net_device *netdev = pci_get_drvdata(pdev);
  2168. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2169. netif_device_detach(netdev);
  2170. if (netif_running(netdev))
  2171. pch_gbe_down(adapter);
  2172. pci_disable_device(pdev);
  2173. /* Request a slot slot reset. */
  2174. return PCI_ERS_RESULT_NEED_RESET;
  2175. }
  2176. static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
  2177. {
  2178. struct net_device *netdev = pci_get_drvdata(pdev);
  2179. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2180. struct pch_gbe_hw *hw = &adapter->hw;
  2181. if (pci_enable_device(pdev)) {
  2182. pr_err("Cannot re-enable PCI device after reset\n");
  2183. return PCI_ERS_RESULT_DISCONNECT;
  2184. }
  2185. pci_set_master(pdev);
  2186. pci_enable_wake(pdev, PCI_D0, 0);
  2187. pch_gbe_hal_power_up_phy(hw);
  2188. pch_gbe_reset(adapter);
  2189. /* Clear wake up status */
  2190. pch_gbe_mac_set_wol_event(hw, 0);
  2191. return PCI_ERS_RESULT_RECOVERED;
  2192. }
  2193. static void pch_gbe_io_resume(struct pci_dev *pdev)
  2194. {
  2195. struct net_device *netdev = pci_get_drvdata(pdev);
  2196. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2197. if (netif_running(netdev)) {
  2198. if (pch_gbe_up(adapter)) {
  2199. pr_debug("can't bring device back up after reset\n");
  2200. return;
  2201. }
  2202. }
  2203. netif_device_attach(netdev);
  2204. }
  2205. static int __pch_gbe_suspend(struct pci_dev *pdev)
  2206. {
  2207. struct net_device *netdev = pci_get_drvdata(pdev);
  2208. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2209. struct pch_gbe_hw *hw = &adapter->hw;
  2210. u32 wufc = adapter->wake_up_evt;
  2211. int retval = 0;
  2212. netif_device_detach(netdev);
  2213. if (netif_running(netdev))
  2214. pch_gbe_down(adapter);
  2215. if (wufc) {
  2216. pch_gbe_set_multi(netdev);
  2217. pch_gbe_setup_rctl(adapter);
  2218. pch_gbe_configure_rx(adapter);
  2219. pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
  2220. hw->mac.link_duplex);
  2221. pch_gbe_set_mode(adapter, hw->mac.link_speed,
  2222. hw->mac.link_duplex);
  2223. pch_gbe_mac_set_wol_event(hw, wufc);
  2224. pci_disable_device(pdev);
  2225. } else {
  2226. pch_gbe_hal_power_down_phy(hw);
  2227. pch_gbe_mac_set_wol_event(hw, wufc);
  2228. pci_disable_device(pdev);
  2229. }
  2230. return retval;
  2231. }
  2232. #ifdef CONFIG_PM
  2233. static int pch_gbe_suspend(struct device *device)
  2234. {
  2235. struct pci_dev *pdev = to_pci_dev(device);
  2236. return __pch_gbe_suspend(pdev);
  2237. }
  2238. static int pch_gbe_resume(struct device *device)
  2239. {
  2240. struct pci_dev *pdev = to_pci_dev(device);
  2241. struct net_device *netdev = pci_get_drvdata(pdev);
  2242. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2243. struct pch_gbe_hw *hw = &adapter->hw;
  2244. u32 err;
  2245. err = pci_enable_device(pdev);
  2246. if (err) {
  2247. pr_err("Cannot enable PCI device from suspend\n");
  2248. return err;
  2249. }
  2250. pci_set_master(pdev);
  2251. pch_gbe_hal_power_up_phy(hw);
  2252. pch_gbe_reset(adapter);
  2253. /* Clear wake on lan control and status */
  2254. pch_gbe_mac_set_wol_event(hw, 0);
  2255. if (netif_running(netdev))
  2256. pch_gbe_up(adapter);
  2257. netif_device_attach(netdev);
  2258. return 0;
  2259. }
  2260. #endif /* CONFIG_PM */
  2261. static void pch_gbe_shutdown(struct pci_dev *pdev)
  2262. {
  2263. __pch_gbe_suspend(pdev);
  2264. if (system_state == SYSTEM_POWER_OFF) {
  2265. pci_wake_from_d3(pdev, true);
  2266. pci_set_power_state(pdev, PCI_D3hot);
  2267. }
  2268. }
  2269. static void pch_gbe_remove(struct pci_dev *pdev)
  2270. {
  2271. struct net_device *netdev = pci_get_drvdata(pdev);
  2272. struct pch_gbe_adapter *adapter = netdev_priv(netdev);
  2273. cancel_work_sync(&adapter->reset_task);
  2274. unregister_netdev(netdev);
  2275. pch_gbe_hal_phy_hw_reset(&adapter->hw);
  2276. kfree(adapter->tx_ring);
  2277. kfree(adapter->rx_ring);
  2278. iounmap(adapter->hw.reg);
  2279. pci_release_regions(pdev);
  2280. free_netdev(netdev);
  2281. pci_disable_device(pdev);
  2282. }
  2283. static int pch_gbe_probe(struct pci_dev *pdev,
  2284. const struct pci_device_id *pci_id)
  2285. {
  2286. struct net_device *netdev;
  2287. struct pch_gbe_adapter *adapter;
  2288. int ret;
  2289. ret = pci_enable_device(pdev);
  2290. if (ret)
  2291. return ret;
  2292. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
  2293. || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2294. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2295. if (ret) {
  2296. ret = pci_set_consistent_dma_mask(pdev,
  2297. DMA_BIT_MASK(32));
  2298. if (ret) {
  2299. dev_err(&pdev->dev, "ERR: No usable DMA "
  2300. "configuration, aborting\n");
  2301. goto err_disable_device;
  2302. }
  2303. }
  2304. }
  2305. ret = pci_request_regions(pdev, KBUILD_MODNAME);
  2306. if (ret) {
  2307. dev_err(&pdev->dev,
  2308. "ERR: Can't reserve PCI I/O and memory resources\n");
  2309. goto err_disable_device;
  2310. }
  2311. pci_set_master(pdev);
  2312. netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
  2313. if (!netdev) {
  2314. ret = -ENOMEM;
  2315. goto err_release_pci;
  2316. }
  2317. SET_NETDEV_DEV(netdev, &pdev->dev);
  2318. pci_set_drvdata(pdev, netdev);
  2319. adapter = netdev_priv(netdev);
  2320. adapter->netdev = netdev;
  2321. adapter->pdev = pdev;
  2322. adapter->hw.back = adapter;
  2323. adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
  2324. if (!adapter->hw.reg) {
  2325. ret = -EIO;
  2326. dev_err(&pdev->dev, "Can't ioremap\n");
  2327. goto err_free_netdev;
  2328. }
  2329. #ifdef CONFIG_PCH_PTP
  2330. adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
  2331. PCI_DEVFN(12, 4));
  2332. if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
  2333. pr_err("Bad ptp filter\n");
  2334. return -EINVAL;
  2335. }
  2336. #endif
  2337. netdev->netdev_ops = &pch_gbe_netdev_ops;
  2338. netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
  2339. netif_napi_add(netdev, &adapter->napi,
  2340. pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
  2341. netdev->hw_features = NETIF_F_RXCSUM |
  2342. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  2343. netdev->features = netdev->hw_features;
  2344. pch_gbe_set_ethtool_ops(netdev);
  2345. pch_gbe_mac_load_mac_addr(&adapter->hw);
  2346. pch_gbe_mac_reset_hw(&adapter->hw);
  2347. /* setup the private structure */
  2348. ret = pch_gbe_sw_init(adapter);
  2349. if (ret)
  2350. goto err_iounmap;
  2351. /* Initialize PHY */
  2352. ret = pch_gbe_init_phy(adapter);
  2353. if (ret) {
  2354. dev_err(&pdev->dev, "PHY initialize error\n");
  2355. goto err_free_adapter;
  2356. }
  2357. pch_gbe_hal_get_bus_info(&adapter->hw);
  2358. /* Read the MAC address. and store to the private data */
  2359. ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
  2360. if (ret) {
  2361. dev_err(&pdev->dev, "MAC address Read Error\n");
  2362. goto err_free_adapter;
  2363. }
  2364. memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
  2365. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2366. /*
  2367. * If the MAC is invalid (or just missing), display a warning
  2368. * but do not abort setting up the device. pch_gbe_up will
  2369. * prevent the interface from being brought up until a valid MAC
  2370. * is set.
  2371. */
  2372. dev_err(&pdev->dev, "Invalid MAC address, "
  2373. "interface disabled.\n");
  2374. }
  2375. setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
  2376. (unsigned long)adapter);
  2377. INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
  2378. pch_gbe_check_options(adapter);
  2379. /* initialize the wol settings based on the eeprom settings */
  2380. adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
  2381. dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
  2382. /* reset the hardware with the new settings */
  2383. pch_gbe_reset(adapter);
  2384. ret = register_netdev(netdev);
  2385. if (ret)
  2386. goto err_free_adapter;
  2387. /* tell the stack to leave us alone until pch_gbe_open() is called */
  2388. netif_carrier_off(netdev);
  2389. netif_stop_queue(netdev);
  2390. dev_dbg(&pdev->dev, "PCH Network Connection\n");
  2391. device_set_wakeup_enable(&pdev->dev, 1);
  2392. return 0;
  2393. err_free_adapter:
  2394. pch_gbe_hal_phy_hw_reset(&adapter->hw);
  2395. kfree(adapter->tx_ring);
  2396. kfree(adapter->rx_ring);
  2397. err_iounmap:
  2398. iounmap(adapter->hw.reg);
  2399. err_free_netdev:
  2400. free_netdev(netdev);
  2401. err_release_pci:
  2402. pci_release_regions(pdev);
  2403. err_disable_device:
  2404. pci_disable_device(pdev);
  2405. return ret;
  2406. }
  2407. static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
  2408. {.vendor = PCI_VENDOR_ID_INTEL,
  2409. .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
  2410. .subvendor = PCI_ANY_ID,
  2411. .subdevice = PCI_ANY_ID,
  2412. .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
  2413. .class_mask = (0xFFFF00)
  2414. },
  2415. {.vendor = PCI_VENDOR_ID_ROHM,
  2416. .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
  2417. .subvendor = PCI_ANY_ID,
  2418. .subdevice = PCI_ANY_ID,
  2419. .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
  2420. .class_mask = (0xFFFF00)
  2421. },
  2422. {.vendor = PCI_VENDOR_ID_ROHM,
  2423. .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
  2424. .subvendor = PCI_ANY_ID,
  2425. .subdevice = PCI_ANY_ID,
  2426. .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
  2427. .class_mask = (0xFFFF00)
  2428. },
  2429. /* required last entry */
  2430. {0}
  2431. };
  2432. #ifdef CONFIG_PM
  2433. static const struct dev_pm_ops pch_gbe_pm_ops = {
  2434. .suspend = pch_gbe_suspend,
  2435. .resume = pch_gbe_resume,
  2436. .freeze = pch_gbe_suspend,
  2437. .thaw = pch_gbe_resume,
  2438. .poweroff = pch_gbe_suspend,
  2439. .restore = pch_gbe_resume,
  2440. };
  2441. #endif
  2442. static struct pci_error_handlers pch_gbe_err_handler = {
  2443. .error_detected = pch_gbe_io_error_detected,
  2444. .slot_reset = pch_gbe_io_slot_reset,
  2445. .resume = pch_gbe_io_resume
  2446. };
  2447. static struct pci_driver pch_gbe_driver = {
  2448. .name = KBUILD_MODNAME,
  2449. .id_table = pch_gbe_pcidev_id,
  2450. .probe = pch_gbe_probe,
  2451. .remove = pch_gbe_remove,
  2452. #ifdef CONFIG_PM
  2453. .driver.pm = &pch_gbe_pm_ops,
  2454. #endif
  2455. .shutdown = pch_gbe_shutdown,
  2456. .err_handler = &pch_gbe_err_handler
  2457. };
  2458. static int __init pch_gbe_init_module(void)
  2459. {
  2460. int ret;
  2461. ret = pci_register_driver(&pch_gbe_driver);
  2462. if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
  2463. if (copybreak == 0) {
  2464. pr_info("copybreak disabled\n");
  2465. } else {
  2466. pr_info("copybreak enabled for packets <= %u bytes\n",
  2467. copybreak);
  2468. }
  2469. }
  2470. return ret;
  2471. }
  2472. static void __exit pch_gbe_exit_module(void)
  2473. {
  2474. pci_unregister_driver(&pch_gbe_driver);
  2475. }
  2476. module_init(pch_gbe_init_module);
  2477. module_exit(pch_gbe_exit_module);
  2478. MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
  2479. MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
  2480. MODULE_LICENSE("GPL");
  2481. MODULE_VERSION(DRV_VERSION);
  2482. MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
  2483. module_param(copybreak, uint, 0644);
  2484. MODULE_PARM_DESC(copybreak,
  2485. "Maximum size of packet that is copied to a new buffer on receive");
  2486. /* pch_gbe_main.c */