cxgb3_main.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474
  1. /*
  2. * This file is part of the Chelsio T3 Ethernet driver for Linux.
  3. *
  4. * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
  5. *
  6. * This program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  8. * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
  9. * release for licensing terms and conditions.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/init.h>
  14. #include <linux/pci.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/if_vlan.h>
  19. #include <linux/mii.h>
  20. #include <linux/sockios.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/rtnetlink.h>
  24. #include <asm/uaccess.h>
  25. #include "common.h"
  26. #include "cxgb3_ioctl.h"
  27. #include "regs.h"
  28. #include "cxgb3_offload.h"
  29. #include "version.h"
  30. #include "cxgb3_ctl_defs.h"
  31. #include "t3_cpl.h"
  32. #include "firmware_exports.h"
  33. enum {
  34. MAX_TXQ_ENTRIES = 16384,
  35. MAX_CTRL_TXQ_ENTRIES = 1024,
  36. MAX_RSPQ_ENTRIES = 16384,
  37. MAX_RX_BUFFERS = 16384,
  38. MAX_RX_JUMBO_BUFFERS = 16384,
  39. MIN_TXQ_ENTRIES = 4,
  40. MIN_CTRL_TXQ_ENTRIES = 4,
  41. MIN_RSPQ_ENTRIES = 32,
  42. MIN_FL_ENTRIES = 32
  43. };
  44. #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  45. #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  46. NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  47. NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  48. #define EEPROM_MAGIC 0x38E2F10C
  49. #define to_net_dev(class) container_of(class, struct net_device, class_dev)
  50. #define CH_DEVICE(devid, ssid, idx) \
  51. { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
  52. static const struct pci_device_id cxgb3_pci_tbl[] = {
  53. CH_DEVICE(0x20, 1, 0), /* PE9000 */
  54. CH_DEVICE(0x21, 1, 1), /* T302E */
  55. CH_DEVICE(0x22, 1, 2), /* T310E */
  56. CH_DEVICE(0x23, 1, 3), /* T320X */
  57. CH_DEVICE(0x24, 1, 1), /* T302X */
  58. CH_DEVICE(0x25, 1, 3), /* T320E */
  59. CH_DEVICE(0x26, 1, 2), /* T310X */
  60. CH_DEVICE(0x30, 1, 2), /* T3B10 */
  61. CH_DEVICE(0x31, 1, 3), /* T3B20 */
  62. CH_DEVICE(0x32, 1, 1), /* T3B02 */
  63. {0,}
  64. };
  65. MODULE_DESCRIPTION(DRV_DESC);
  66. MODULE_AUTHOR("Chelsio Communications");
  67. MODULE_LICENSE("GPL");
  68. MODULE_VERSION(DRV_VERSION);
  69. MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
  70. static int dflt_msg_enable = DFLT_MSG_ENABLE;
  71. module_param(dflt_msg_enable, int, 0644);
  72. MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
  73. /*
  74. * The driver uses the best interrupt scheme available on a platform in the
  75. * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
  76. * of these schemes the driver may consider as follows:
  77. *
  78. * msi = 2: choose from among all three options
  79. * msi = 1: only consider MSI and pin interrupts
  80. * msi = 0: force pin interrupts
  81. */
  82. static int msi = 2;
  83. module_param(msi, int, 0644);
  84. MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
  85. /*
  86. * The driver enables offload as a default.
  87. * To disable it, use ofld_disable = 1.
  88. */
  89. static int ofld_disable = 0;
  90. module_param(ofld_disable, int, 0644);
  91. MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
  92. /*
  93. * We have work elements that we need to cancel when an interface is taken
  94. * down. Normally the work elements would be executed by keventd but that
  95. * can deadlock because of linkwatch. If our close method takes the rtnl
  96. * lock and linkwatch is ahead of our work elements in keventd, linkwatch
  97. * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
  98. * for our work to complete. Get our own work queue to solve this.
  99. */
  100. static struct workqueue_struct *cxgb3_wq;
  101. /**
  102. * link_report - show link status and link speed/duplex
  103. * @p: the port whose settings are to be reported
  104. *
  105. * Shows the link status, speed, and duplex of a port.
  106. */
  107. static void link_report(struct net_device *dev)
  108. {
  109. if (!netif_carrier_ok(dev))
  110. printk(KERN_INFO "%s: link down\n", dev->name);
  111. else {
  112. const char *s = "10Mbps";
  113. const struct port_info *p = netdev_priv(dev);
  114. switch (p->link_config.speed) {
  115. case SPEED_10000:
  116. s = "10Gbps";
  117. break;
  118. case SPEED_1000:
  119. s = "1000Mbps";
  120. break;
  121. case SPEED_100:
  122. s = "100Mbps";
  123. break;
  124. }
  125. printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
  126. p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
  127. }
  128. }
  129. /**
  130. * t3_os_link_changed - handle link status changes
  131. * @adapter: the adapter associated with the link change
  132. * @port_id: the port index whose limk status has changed
  133. * @link_stat: the new status of the link
  134. * @speed: the new speed setting
  135. * @duplex: the new duplex setting
  136. * @pause: the new flow-control setting
  137. *
  138. * This is the OS-dependent handler for link status changes. The OS
  139. * neutral handler takes care of most of the processing for these events,
  140. * then calls this handler for any OS-specific processing.
  141. */
  142. void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
  143. int speed, int duplex, int pause)
  144. {
  145. struct net_device *dev = adapter->port[port_id];
  146. /* Skip changes from disabled ports. */
  147. if (!netif_running(dev))
  148. return;
  149. if (link_stat != netif_carrier_ok(dev)) {
  150. if (link_stat)
  151. netif_carrier_on(dev);
  152. else
  153. netif_carrier_off(dev);
  154. link_report(dev);
  155. }
  156. }
  157. static void cxgb_set_rxmode(struct net_device *dev)
  158. {
  159. struct t3_rx_mode rm;
  160. struct port_info *pi = netdev_priv(dev);
  161. init_rx_mode(&rm, dev, dev->mc_list);
  162. t3_mac_set_rx_mode(&pi->mac, &rm);
  163. }
  164. /**
  165. * link_start - enable a port
  166. * @dev: the device to enable
  167. *
  168. * Performs the MAC and PHY actions needed to enable a port.
  169. */
  170. static void link_start(struct net_device *dev)
  171. {
  172. struct t3_rx_mode rm;
  173. struct port_info *pi = netdev_priv(dev);
  174. struct cmac *mac = &pi->mac;
  175. init_rx_mode(&rm, dev, dev->mc_list);
  176. t3_mac_reset(mac);
  177. t3_mac_set_mtu(mac, dev->mtu);
  178. t3_mac_set_address(mac, 0, dev->dev_addr);
  179. t3_mac_set_rx_mode(mac, &rm);
  180. t3_link_start(&pi->phy, mac, &pi->link_config);
  181. t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
  182. }
  183. static inline void cxgb_disable_msi(struct adapter *adapter)
  184. {
  185. if (adapter->flags & USING_MSIX) {
  186. pci_disable_msix(adapter->pdev);
  187. adapter->flags &= ~USING_MSIX;
  188. } else if (adapter->flags & USING_MSI) {
  189. pci_disable_msi(adapter->pdev);
  190. adapter->flags &= ~USING_MSI;
  191. }
  192. }
  193. /*
  194. * Interrupt handler for asynchronous events used with MSI-X.
  195. */
  196. static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
  197. {
  198. t3_slow_intr_handler(cookie);
  199. return IRQ_HANDLED;
  200. }
  201. /*
  202. * Name the MSI-X interrupts.
  203. */
  204. static void name_msix_vecs(struct adapter *adap)
  205. {
  206. int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
  207. snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
  208. adap->msix_info[0].desc[n] = 0;
  209. for_each_port(adap, j) {
  210. struct net_device *d = adap->port[j];
  211. const struct port_info *pi = netdev_priv(d);
  212. for (i = 0; i < pi->nqsets; i++, msi_idx++) {
  213. snprintf(adap->msix_info[msi_idx].desc, n,
  214. "%s (queue %d)", d->name, i);
  215. adap->msix_info[msi_idx].desc[n] = 0;
  216. }
  217. }
  218. }
  219. static int request_msix_data_irqs(struct adapter *adap)
  220. {
  221. int i, j, err, qidx = 0;
  222. for_each_port(adap, i) {
  223. int nqsets = adap2pinfo(adap, i)->nqsets;
  224. for (j = 0; j < nqsets; ++j) {
  225. err = request_irq(adap->msix_info[qidx + 1].vec,
  226. t3_intr_handler(adap,
  227. adap->sge.qs[qidx].
  228. rspq.polling), 0,
  229. adap->msix_info[qidx + 1].desc,
  230. &adap->sge.qs[qidx]);
  231. if (err) {
  232. while (--qidx >= 0)
  233. free_irq(adap->msix_info[qidx + 1].vec,
  234. &adap->sge.qs[qidx]);
  235. return err;
  236. }
  237. qidx++;
  238. }
  239. }
  240. return 0;
  241. }
  242. /**
  243. * setup_rss - configure RSS
  244. * @adap: the adapter
  245. *
  246. * Sets up RSS to distribute packets to multiple receive queues. We
  247. * configure the RSS CPU lookup table to distribute to the number of HW
  248. * receive queues, and the response queue lookup table to narrow that
  249. * down to the response queues actually configured for each port.
  250. * We always configure the RSS mapping for two ports since the mapping
  251. * table has plenty of entries.
  252. */
  253. static void setup_rss(struct adapter *adap)
  254. {
  255. int i;
  256. unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
  257. unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
  258. u8 cpus[SGE_QSETS + 1];
  259. u16 rspq_map[RSS_TABLE_SIZE];
  260. for (i = 0; i < SGE_QSETS; ++i)
  261. cpus[i] = i;
  262. cpus[SGE_QSETS] = 0xff; /* terminator */
  263. for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
  264. rspq_map[i] = i % nq0;
  265. rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
  266. }
  267. t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
  268. F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
  269. V_RRCPLCPUSIZE(6), cpus, rspq_map);
  270. }
  271. /*
  272. * If we have multiple receive queues per port serviced by NAPI we need one
  273. * netdevice per queue as NAPI operates on netdevices. We already have one
  274. * netdevice, namely the one associated with the interface, so we use dummy
  275. * ones for any additional queues. Note that these netdevices exist purely
  276. * so that NAPI has something to work with, they do not represent network
  277. * ports and are not registered.
  278. */
  279. static int init_dummy_netdevs(struct adapter *adap)
  280. {
  281. int i, j, dummy_idx = 0;
  282. struct net_device *nd;
  283. for_each_port(adap, i) {
  284. struct net_device *dev = adap->port[i];
  285. const struct port_info *pi = netdev_priv(dev);
  286. for (j = 0; j < pi->nqsets - 1; j++) {
  287. if (!adap->dummy_netdev[dummy_idx]) {
  288. nd = alloc_netdev(0, "", ether_setup);
  289. if (!nd)
  290. goto free_all;
  291. nd->priv = adap;
  292. nd->weight = 64;
  293. set_bit(__LINK_STATE_START, &nd->state);
  294. adap->dummy_netdev[dummy_idx] = nd;
  295. }
  296. strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
  297. dummy_idx++;
  298. }
  299. }
  300. return 0;
  301. free_all:
  302. while (--dummy_idx >= 0) {
  303. free_netdev(adap->dummy_netdev[dummy_idx]);
  304. adap->dummy_netdev[dummy_idx] = NULL;
  305. }
  306. return -ENOMEM;
  307. }
  308. /*
  309. * Wait until all NAPI handlers are descheduled. This includes the handlers of
  310. * both netdevices representing interfaces and the dummy ones for the extra
  311. * queues.
  312. */
  313. static void quiesce_rx(struct adapter *adap)
  314. {
  315. int i;
  316. struct net_device *dev;
  317. for_each_port(adap, i) {
  318. dev = adap->port[i];
  319. while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
  320. msleep(1);
  321. }
  322. for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
  323. dev = adap->dummy_netdev[i];
  324. if (dev)
  325. while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
  326. msleep(1);
  327. }
  328. }
  329. /**
  330. * setup_sge_qsets - configure SGE Tx/Rx/response queues
  331. * @adap: the adapter
  332. *
  333. * Determines how many sets of SGE queues to use and initializes them.
  334. * We support multiple queue sets per port if we have MSI-X, otherwise
  335. * just one queue set per port.
  336. */
  337. static int setup_sge_qsets(struct adapter *adap)
  338. {
  339. int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
  340. unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
  341. if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
  342. irq_idx = -1;
  343. for_each_port(adap, i) {
  344. struct net_device *dev = adap->port[i];
  345. const struct port_info *pi = netdev_priv(dev);
  346. for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
  347. err = t3_sge_alloc_qset(adap, qset_idx, 1,
  348. (adap->flags & USING_MSIX) ? qset_idx + 1 :
  349. irq_idx,
  350. &adap->params.sge.qset[qset_idx], ntxq,
  351. j == 0 ? dev :
  352. adap-> dummy_netdev[dummy_dev_idx++]);
  353. if (err) {
  354. t3_free_sge_resources(adap);
  355. return err;
  356. }
  357. }
  358. }
  359. return 0;
  360. }
  361. static ssize_t attr_show(struct class_device *cd, char *buf,
  362. ssize_t(*format) (struct adapter *, char *))
  363. {
  364. ssize_t len;
  365. struct adapter *adap = to_net_dev(cd)->priv;
  366. /* Synchronize with ioctls that may shut down the device */
  367. rtnl_lock();
  368. len = (*format) (adap, buf);
  369. rtnl_unlock();
  370. return len;
  371. }
  372. static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
  373. ssize_t(*set) (struct adapter *, unsigned int),
  374. unsigned int min_val, unsigned int max_val)
  375. {
  376. char *endp;
  377. ssize_t ret;
  378. unsigned int val;
  379. struct adapter *adap = to_net_dev(cd)->priv;
  380. if (!capable(CAP_NET_ADMIN))
  381. return -EPERM;
  382. val = simple_strtoul(buf, &endp, 0);
  383. if (endp == buf || val < min_val || val > max_val)
  384. return -EINVAL;
  385. rtnl_lock();
  386. ret = (*set) (adap, val);
  387. if (!ret)
  388. ret = len;
  389. rtnl_unlock();
  390. return ret;
  391. }
  392. #define CXGB3_SHOW(name, val_expr) \
  393. static ssize_t format_##name(struct adapter *adap, char *buf) \
  394. { \
  395. return sprintf(buf, "%u\n", val_expr); \
  396. } \
  397. static ssize_t show_##name(struct class_device *cd, char *buf) \
  398. { \
  399. return attr_show(cd, buf, format_##name); \
  400. }
  401. static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
  402. {
  403. if (adap->flags & FULL_INIT_DONE)
  404. return -EBUSY;
  405. if (val && adap->params.rev == 0)
  406. return -EINVAL;
  407. if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
  408. return -EINVAL;
  409. adap->params.mc5.nfilters = val;
  410. return 0;
  411. }
  412. static ssize_t store_nfilters(struct class_device *cd, const char *buf,
  413. size_t len)
  414. {
  415. return attr_store(cd, buf, len, set_nfilters, 0, ~0);
  416. }
  417. static ssize_t set_nservers(struct adapter *adap, unsigned int val)
  418. {
  419. if (adap->flags & FULL_INIT_DONE)
  420. return -EBUSY;
  421. if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
  422. return -EINVAL;
  423. adap->params.mc5.nservers = val;
  424. return 0;
  425. }
  426. static ssize_t store_nservers(struct class_device *cd, const char *buf,
  427. size_t len)
  428. {
  429. return attr_store(cd, buf, len, set_nservers, 0, ~0);
  430. }
  431. #define CXGB3_ATTR_R(name, val_expr) \
  432. CXGB3_SHOW(name, val_expr) \
  433. static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  434. #define CXGB3_ATTR_RW(name, val_expr, store_method) \
  435. CXGB3_SHOW(name, val_expr) \
  436. static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
  437. CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
  438. CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
  439. CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
  440. static struct attribute *cxgb3_attrs[] = {
  441. &class_device_attr_cam_size.attr,
  442. &class_device_attr_nfilters.attr,
  443. &class_device_attr_nservers.attr,
  444. NULL
  445. };
  446. static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
  447. static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
  448. {
  449. ssize_t len;
  450. unsigned int v, addr, bpt, cpt;
  451. struct adapter *adap = to_net_dev(cd)->priv;
  452. addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
  453. rtnl_lock();
  454. t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
  455. v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
  456. if (sched & 1)
  457. v >>= 16;
  458. bpt = (v >> 8) & 0xff;
  459. cpt = v & 0xff;
  460. if (!cpt)
  461. len = sprintf(buf, "disabled\n");
  462. else {
  463. v = (adap->params.vpd.cclk * 1000) / cpt;
  464. len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
  465. }
  466. rtnl_unlock();
  467. return len;
  468. }
  469. static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
  470. size_t len, int sched)
  471. {
  472. char *endp;
  473. ssize_t ret;
  474. unsigned int val;
  475. struct adapter *adap = to_net_dev(cd)->priv;
  476. if (!capable(CAP_NET_ADMIN))
  477. return -EPERM;
  478. val = simple_strtoul(buf, &endp, 0);
  479. if (endp == buf || val > 10000000)
  480. return -EINVAL;
  481. rtnl_lock();
  482. ret = t3_config_sched(adap, val, sched);
  483. if (!ret)
  484. ret = len;
  485. rtnl_unlock();
  486. return ret;
  487. }
  488. #define TM_ATTR(name, sched) \
  489. static ssize_t show_##name(struct class_device *cd, char *buf) \
  490. { \
  491. return tm_attr_show(cd, buf, sched); \
  492. } \
  493. static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
  494. { \
  495. return tm_attr_store(cd, buf, len, sched); \
  496. } \
  497. static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
  498. TM_ATTR(sched0, 0);
  499. TM_ATTR(sched1, 1);
  500. TM_ATTR(sched2, 2);
  501. TM_ATTR(sched3, 3);
  502. TM_ATTR(sched4, 4);
  503. TM_ATTR(sched5, 5);
  504. TM_ATTR(sched6, 6);
  505. TM_ATTR(sched7, 7);
  506. static struct attribute *offload_attrs[] = {
  507. &class_device_attr_sched0.attr,
  508. &class_device_attr_sched1.attr,
  509. &class_device_attr_sched2.attr,
  510. &class_device_attr_sched3.attr,
  511. &class_device_attr_sched4.attr,
  512. &class_device_attr_sched5.attr,
  513. &class_device_attr_sched6.attr,
  514. &class_device_attr_sched7.attr,
  515. NULL
  516. };
  517. static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
  518. /*
  519. * Sends an sk_buff to an offload queue driver
  520. * after dealing with any active network taps.
  521. */
  522. static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
  523. {
  524. int ret;
  525. local_bh_disable();
  526. ret = t3_offload_tx(tdev, skb);
  527. local_bh_enable();
  528. return ret;
  529. }
  530. static int write_smt_entry(struct adapter *adapter, int idx)
  531. {
  532. struct cpl_smt_write_req *req;
  533. struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  534. if (!skb)
  535. return -ENOMEM;
  536. req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
  537. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  538. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
  539. req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
  540. req->iff = idx;
  541. memset(req->src_mac1, 0, sizeof(req->src_mac1));
  542. memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
  543. skb->priority = 1;
  544. offload_tx(&adapter->tdev, skb);
  545. return 0;
  546. }
  547. static int init_smt(struct adapter *adapter)
  548. {
  549. int i;
  550. for_each_port(adapter, i)
  551. write_smt_entry(adapter, i);
  552. return 0;
  553. }
  554. static void init_port_mtus(struct adapter *adapter)
  555. {
  556. unsigned int mtus = adapter->port[0]->mtu;
  557. if (adapter->port[1])
  558. mtus |= adapter->port[1]->mtu << 16;
  559. t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
  560. }
  561. /**
  562. * cxgb_up - enable the adapter
  563. * @adapter: adapter being enabled
  564. *
  565. * Called when the first port is enabled, this function performs the
  566. * actions necessary to make an adapter operational, such as completing
  567. * the initialization of HW modules, and enabling interrupts.
  568. *
  569. * Must be called with the rtnl lock held.
  570. */
  571. static int cxgb_up(struct adapter *adap)
  572. {
  573. int err = 0;
  574. if (!(adap->flags & FULL_INIT_DONE)) {
  575. err = t3_check_fw_version(adap);
  576. if (err) {
  577. dev_err(&adap->pdev->dev,
  578. "adapter FW is not compatible with driver\n");
  579. goto out;
  580. }
  581. err = init_dummy_netdevs(adap);
  582. if (err)
  583. goto out;
  584. err = t3_init_hw(adap, 0);
  585. if (err)
  586. goto out;
  587. err = setup_sge_qsets(adap);
  588. if (err)
  589. goto out;
  590. setup_rss(adap);
  591. adap->flags |= FULL_INIT_DONE;
  592. }
  593. t3_intr_clear(adap);
  594. if (adap->flags & USING_MSIX) {
  595. name_msix_vecs(adap);
  596. err = request_irq(adap->msix_info[0].vec,
  597. t3_async_intr_handler, 0,
  598. adap->msix_info[0].desc, adap);
  599. if (err)
  600. goto irq_err;
  601. if (request_msix_data_irqs(adap)) {
  602. free_irq(adap->msix_info[0].vec, adap);
  603. goto irq_err;
  604. }
  605. } else if ((err = request_irq(adap->pdev->irq,
  606. t3_intr_handler(adap,
  607. adap->sge.qs[0].rspq.
  608. polling),
  609. (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
  610. adap->name, adap)))
  611. goto irq_err;
  612. t3_sge_start(adap);
  613. t3_intr_enable(adap);
  614. out:
  615. return err;
  616. irq_err:
  617. CH_ERR(adap, "request_irq failed, err %d\n", err);
  618. goto out;
  619. }
  620. /*
  621. * Release resources when all the ports and offloading have been stopped.
  622. */
  623. static void cxgb_down(struct adapter *adapter)
  624. {
  625. t3_sge_stop(adapter);
  626. spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
  627. t3_intr_disable(adapter);
  628. spin_unlock_irq(&adapter->work_lock);
  629. if (adapter->flags & USING_MSIX) {
  630. int i, n = 0;
  631. free_irq(adapter->msix_info[0].vec, adapter);
  632. for_each_port(adapter, i)
  633. n += adap2pinfo(adapter, i)->nqsets;
  634. for (i = 0; i < n; ++i)
  635. free_irq(adapter->msix_info[i + 1].vec,
  636. &adapter->sge.qs[i]);
  637. } else
  638. free_irq(adapter->pdev->irq, adapter);
  639. flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
  640. quiesce_rx(adapter);
  641. }
  642. static void schedule_chk_task(struct adapter *adap)
  643. {
  644. unsigned int timeo;
  645. timeo = adap->params.linkpoll_period ?
  646. (HZ * adap->params.linkpoll_period) / 10 :
  647. adap->params.stats_update_period * HZ;
  648. if (timeo)
  649. queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
  650. }
  651. static int offload_open(struct net_device *dev)
  652. {
  653. struct adapter *adapter = dev->priv;
  654. struct t3cdev *tdev = T3CDEV(dev);
  655. int adap_up = adapter->open_device_map & PORT_MASK;
  656. int err = 0;
  657. if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  658. return 0;
  659. if (!adap_up && (err = cxgb_up(adapter)) < 0)
  660. return err;
  661. t3_tp_set_offload_mode(adapter, 1);
  662. tdev->lldev = adapter->port[0];
  663. err = cxgb3_offload_activate(adapter);
  664. if (err)
  665. goto out;
  666. init_port_mtus(adapter);
  667. t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
  668. adapter->params.b_wnd,
  669. adapter->params.rev == 0 ?
  670. adapter->port[0]->mtu : 0xffff);
  671. init_smt(adapter);
  672. /* Never mind if the next step fails */
  673. sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
  674. /* Call back all registered clients */
  675. cxgb3_add_clients(tdev);
  676. out:
  677. /* restore them in case the offload module has changed them */
  678. if (err) {
  679. t3_tp_set_offload_mode(adapter, 0);
  680. clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
  681. cxgb3_set_dummy_ops(tdev);
  682. }
  683. return err;
  684. }
  685. static int offload_close(struct t3cdev *tdev)
  686. {
  687. struct adapter *adapter = tdev2adap(tdev);
  688. if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  689. return 0;
  690. /* Call back all registered clients */
  691. cxgb3_remove_clients(tdev);
  692. sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
  693. tdev->lldev = NULL;
  694. cxgb3_set_dummy_ops(tdev);
  695. t3_tp_set_offload_mode(adapter, 0);
  696. clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
  697. if (!adapter->open_device_map)
  698. cxgb_down(adapter);
  699. cxgb3_offload_deactivate(adapter);
  700. return 0;
  701. }
  702. static int cxgb_open(struct net_device *dev)
  703. {
  704. int err;
  705. struct adapter *adapter = dev->priv;
  706. struct port_info *pi = netdev_priv(dev);
  707. int other_ports = adapter->open_device_map & PORT_MASK;
  708. if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
  709. return err;
  710. set_bit(pi->port_id, &adapter->open_device_map);
  711. if (!ofld_disable) {
  712. err = offload_open(dev);
  713. if (err)
  714. printk(KERN_WARNING
  715. "Could not initialize offload capabilities\n");
  716. }
  717. link_start(dev);
  718. t3_port_intr_enable(adapter, pi->port_id);
  719. netif_start_queue(dev);
  720. if (!other_ports)
  721. schedule_chk_task(adapter);
  722. return 0;
  723. }
  724. static int cxgb_close(struct net_device *dev)
  725. {
  726. struct adapter *adapter = dev->priv;
  727. struct port_info *p = netdev_priv(dev);
  728. t3_port_intr_disable(adapter, p->port_id);
  729. netif_stop_queue(dev);
  730. p->phy.ops->power_down(&p->phy, 1);
  731. netif_carrier_off(dev);
  732. t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
  733. spin_lock(&adapter->work_lock); /* sync with update task */
  734. clear_bit(p->port_id, &adapter->open_device_map);
  735. spin_unlock(&adapter->work_lock);
  736. if (!(adapter->open_device_map & PORT_MASK))
  737. cancel_rearming_delayed_workqueue(cxgb3_wq,
  738. &adapter->adap_check_task);
  739. if (!adapter->open_device_map)
  740. cxgb_down(adapter);
  741. return 0;
  742. }
  743. static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
  744. {
  745. struct adapter *adapter = dev->priv;
  746. struct port_info *p = netdev_priv(dev);
  747. struct net_device_stats *ns = &p->netstats;
  748. const struct mac_stats *pstats;
  749. spin_lock(&adapter->stats_lock);
  750. pstats = t3_mac_update_stats(&p->mac);
  751. spin_unlock(&adapter->stats_lock);
  752. ns->tx_bytes = pstats->tx_octets;
  753. ns->tx_packets = pstats->tx_frames;
  754. ns->rx_bytes = pstats->rx_octets;
  755. ns->rx_packets = pstats->rx_frames;
  756. ns->multicast = pstats->rx_mcast_frames;
  757. ns->tx_errors = pstats->tx_underrun;
  758. ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
  759. pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
  760. pstats->rx_fifo_ovfl;
  761. /* detailed rx_errors */
  762. ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
  763. ns->rx_over_errors = 0;
  764. ns->rx_crc_errors = pstats->rx_fcs_errs;
  765. ns->rx_frame_errors = pstats->rx_symbol_errs;
  766. ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
  767. ns->rx_missed_errors = pstats->rx_cong_drops;
  768. /* detailed tx_errors */
  769. ns->tx_aborted_errors = 0;
  770. ns->tx_carrier_errors = 0;
  771. ns->tx_fifo_errors = pstats->tx_underrun;
  772. ns->tx_heartbeat_errors = 0;
  773. ns->tx_window_errors = 0;
  774. return ns;
  775. }
  776. static u32 get_msglevel(struct net_device *dev)
  777. {
  778. struct adapter *adapter = dev->priv;
  779. return adapter->msg_enable;
  780. }
  781. static void set_msglevel(struct net_device *dev, u32 val)
  782. {
  783. struct adapter *adapter = dev->priv;
  784. adapter->msg_enable = val;
  785. }
  786. static char stats_strings[][ETH_GSTRING_LEN] = {
  787. "TxOctetsOK ",
  788. "TxFramesOK ",
  789. "TxMulticastFramesOK",
  790. "TxBroadcastFramesOK",
  791. "TxPauseFrames ",
  792. "TxUnderrun ",
  793. "TxExtUnderrun ",
  794. "TxFrames64 ",
  795. "TxFrames65To127 ",
  796. "TxFrames128To255 ",
  797. "TxFrames256To511 ",
  798. "TxFrames512To1023 ",
  799. "TxFrames1024To1518 ",
  800. "TxFrames1519ToMax ",
  801. "RxOctetsOK ",
  802. "RxFramesOK ",
  803. "RxMulticastFramesOK",
  804. "RxBroadcastFramesOK",
  805. "RxPauseFrames ",
  806. "RxFCSErrors ",
  807. "RxSymbolErrors ",
  808. "RxShortErrors ",
  809. "RxJabberErrors ",
  810. "RxLengthErrors ",
  811. "RxFIFOoverflow ",
  812. "RxFrames64 ",
  813. "RxFrames65To127 ",
  814. "RxFrames128To255 ",
  815. "RxFrames256To511 ",
  816. "RxFrames512To1023 ",
  817. "RxFrames1024To1518 ",
  818. "RxFrames1519ToMax ",
  819. "PhyFIFOErrors ",
  820. "TSO ",
  821. "VLANextractions ",
  822. "VLANinsertions ",
  823. "TxCsumOffload ",
  824. "RxCsumGood ",
  825. "RxDrops "
  826. };
  827. static int get_stats_count(struct net_device *dev)
  828. {
  829. return ARRAY_SIZE(stats_strings);
  830. }
  831. #define T3_REGMAP_SIZE (3 * 1024)
  832. static int get_regs_len(struct net_device *dev)
  833. {
  834. return T3_REGMAP_SIZE;
  835. }
  836. static int get_eeprom_len(struct net_device *dev)
  837. {
  838. return EEPROMSIZE;
  839. }
  840. static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  841. {
  842. u32 fw_vers = 0;
  843. struct adapter *adapter = dev->priv;
  844. t3_get_fw_version(adapter, &fw_vers);
  845. strcpy(info->driver, DRV_NAME);
  846. strcpy(info->version, DRV_VERSION);
  847. strcpy(info->bus_info, pci_name(adapter->pdev));
  848. if (!fw_vers)
  849. strcpy(info->fw_version, "N/A");
  850. else
  851. snprintf(info->fw_version, sizeof(info->fw_version),
  852. "%s %u.%u", (fw_vers >> 24) ? "T" : "N",
  853. (fw_vers >> 12) & 0xfff, fw_vers & 0xfff);
  854. }
  855. static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
  856. {
  857. if (stringset == ETH_SS_STATS)
  858. memcpy(data, stats_strings, sizeof(stats_strings));
  859. }
  860. static unsigned long collect_sge_port_stats(struct adapter *adapter,
  861. struct port_info *p, int idx)
  862. {
  863. int i;
  864. unsigned long tot = 0;
  865. for (i = 0; i < p->nqsets; ++i)
  866. tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
  867. return tot;
  868. }
  869. static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
  870. u64 *data)
  871. {
  872. struct adapter *adapter = dev->priv;
  873. struct port_info *pi = netdev_priv(dev);
  874. const struct mac_stats *s;
  875. spin_lock(&adapter->stats_lock);
  876. s = t3_mac_update_stats(&pi->mac);
  877. spin_unlock(&adapter->stats_lock);
  878. *data++ = s->tx_octets;
  879. *data++ = s->tx_frames;
  880. *data++ = s->tx_mcast_frames;
  881. *data++ = s->tx_bcast_frames;
  882. *data++ = s->tx_pause;
  883. *data++ = s->tx_underrun;
  884. *data++ = s->tx_fifo_urun;
  885. *data++ = s->tx_frames_64;
  886. *data++ = s->tx_frames_65_127;
  887. *data++ = s->tx_frames_128_255;
  888. *data++ = s->tx_frames_256_511;
  889. *data++ = s->tx_frames_512_1023;
  890. *data++ = s->tx_frames_1024_1518;
  891. *data++ = s->tx_frames_1519_max;
  892. *data++ = s->rx_octets;
  893. *data++ = s->rx_frames;
  894. *data++ = s->rx_mcast_frames;
  895. *data++ = s->rx_bcast_frames;
  896. *data++ = s->rx_pause;
  897. *data++ = s->rx_fcs_errs;
  898. *data++ = s->rx_symbol_errs;
  899. *data++ = s->rx_short;
  900. *data++ = s->rx_jabber;
  901. *data++ = s->rx_too_long;
  902. *data++ = s->rx_fifo_ovfl;
  903. *data++ = s->rx_frames_64;
  904. *data++ = s->rx_frames_65_127;
  905. *data++ = s->rx_frames_128_255;
  906. *data++ = s->rx_frames_256_511;
  907. *data++ = s->rx_frames_512_1023;
  908. *data++ = s->rx_frames_1024_1518;
  909. *data++ = s->rx_frames_1519_max;
  910. *data++ = pi->phy.fifo_errors;
  911. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
  912. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
  913. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
  914. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
  915. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
  916. *data++ = s->rx_cong_drops;
  917. }
  918. static inline void reg_block_dump(struct adapter *ap, void *buf,
  919. unsigned int start, unsigned int end)
  920. {
  921. u32 *p = buf + start;
  922. for (; start <= end; start += sizeof(u32))
  923. *p++ = t3_read_reg(ap, start);
  924. }
  925. static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
  926. void *buf)
  927. {
  928. struct adapter *ap = dev->priv;
  929. /*
  930. * Version scheme:
  931. * bits 0..9: chip version
  932. * bits 10..15: chip revision
  933. * bit 31: set for PCIe cards
  934. */
  935. regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
  936. /*
  937. * We skip the MAC statistics registers because they are clear-on-read.
  938. * Also reading multi-register stats would need to synchronize with the
  939. * periodic mac stats accumulation. Hard to justify the complexity.
  940. */
  941. memset(buf, 0, T3_REGMAP_SIZE);
  942. reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
  943. reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
  944. reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
  945. reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
  946. reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
  947. reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
  948. XGM_REG(A_XGM_SERDES_STAT3, 1));
  949. reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
  950. XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
  951. }
  952. static int restart_autoneg(struct net_device *dev)
  953. {
  954. struct port_info *p = netdev_priv(dev);
  955. if (!netif_running(dev))
  956. return -EAGAIN;
  957. if (p->link_config.autoneg != AUTONEG_ENABLE)
  958. return -EINVAL;
  959. p->phy.ops->autoneg_restart(&p->phy);
  960. return 0;
  961. }
  962. static int cxgb3_phys_id(struct net_device *dev, u32 data)
  963. {
  964. int i;
  965. struct adapter *adapter = dev->priv;
  966. if (data == 0)
  967. data = 2;
  968. for (i = 0; i < data * 2; i++) {
  969. t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
  970. (i & 1) ? F_GPIO0_OUT_VAL : 0);
  971. if (msleep_interruptible(500))
  972. break;
  973. }
  974. t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
  975. F_GPIO0_OUT_VAL);
  976. return 0;
  977. }
  978. static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  979. {
  980. struct port_info *p = netdev_priv(dev);
  981. cmd->supported = p->link_config.supported;
  982. cmd->advertising = p->link_config.advertising;
  983. if (netif_carrier_ok(dev)) {
  984. cmd->speed = p->link_config.speed;
  985. cmd->duplex = p->link_config.duplex;
  986. } else {
  987. cmd->speed = -1;
  988. cmd->duplex = -1;
  989. }
  990. cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
  991. cmd->phy_address = p->phy.addr;
  992. cmd->transceiver = XCVR_EXTERNAL;
  993. cmd->autoneg = p->link_config.autoneg;
  994. cmd->maxtxpkt = 0;
  995. cmd->maxrxpkt = 0;
  996. return 0;
  997. }
  998. static int speed_duplex_to_caps(int speed, int duplex)
  999. {
  1000. int cap = 0;
  1001. switch (speed) {
  1002. case SPEED_10:
  1003. if (duplex == DUPLEX_FULL)
  1004. cap = SUPPORTED_10baseT_Full;
  1005. else
  1006. cap = SUPPORTED_10baseT_Half;
  1007. break;
  1008. case SPEED_100:
  1009. if (duplex == DUPLEX_FULL)
  1010. cap = SUPPORTED_100baseT_Full;
  1011. else
  1012. cap = SUPPORTED_100baseT_Half;
  1013. break;
  1014. case SPEED_1000:
  1015. if (duplex == DUPLEX_FULL)
  1016. cap = SUPPORTED_1000baseT_Full;
  1017. else
  1018. cap = SUPPORTED_1000baseT_Half;
  1019. break;
  1020. case SPEED_10000:
  1021. if (duplex == DUPLEX_FULL)
  1022. cap = SUPPORTED_10000baseT_Full;
  1023. }
  1024. return cap;
  1025. }
  1026. #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1027. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1028. ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
  1029. ADVERTISED_10000baseT_Full)
  1030. static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1031. {
  1032. struct port_info *p = netdev_priv(dev);
  1033. struct link_config *lc = &p->link_config;
  1034. if (!(lc->supported & SUPPORTED_Autoneg))
  1035. return -EOPNOTSUPP; /* can't change speed/duplex */
  1036. if (cmd->autoneg == AUTONEG_DISABLE) {
  1037. int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
  1038. if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
  1039. return -EINVAL;
  1040. lc->requested_speed = cmd->speed;
  1041. lc->requested_duplex = cmd->duplex;
  1042. lc->advertising = 0;
  1043. } else {
  1044. cmd->advertising &= ADVERTISED_MASK;
  1045. cmd->advertising &= lc->supported;
  1046. if (!cmd->advertising)
  1047. return -EINVAL;
  1048. lc->requested_speed = SPEED_INVALID;
  1049. lc->requested_duplex = DUPLEX_INVALID;
  1050. lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
  1051. }
  1052. lc->autoneg = cmd->autoneg;
  1053. if (netif_running(dev))
  1054. t3_link_start(&p->phy, &p->mac, lc);
  1055. return 0;
  1056. }
  1057. static void get_pauseparam(struct net_device *dev,
  1058. struct ethtool_pauseparam *epause)
  1059. {
  1060. struct port_info *p = netdev_priv(dev);
  1061. epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
  1062. epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
  1063. epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
  1064. }
  1065. static int set_pauseparam(struct net_device *dev,
  1066. struct ethtool_pauseparam *epause)
  1067. {
  1068. struct port_info *p = netdev_priv(dev);
  1069. struct link_config *lc = &p->link_config;
  1070. if (epause->autoneg == AUTONEG_DISABLE)
  1071. lc->requested_fc = 0;
  1072. else if (lc->supported & SUPPORTED_Autoneg)
  1073. lc->requested_fc = PAUSE_AUTONEG;
  1074. else
  1075. return -EINVAL;
  1076. if (epause->rx_pause)
  1077. lc->requested_fc |= PAUSE_RX;
  1078. if (epause->tx_pause)
  1079. lc->requested_fc |= PAUSE_TX;
  1080. if (lc->autoneg == AUTONEG_ENABLE) {
  1081. if (netif_running(dev))
  1082. t3_link_start(&p->phy, &p->mac, lc);
  1083. } else {
  1084. lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
  1085. if (netif_running(dev))
  1086. t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
  1087. }
  1088. return 0;
  1089. }
  1090. static u32 get_rx_csum(struct net_device *dev)
  1091. {
  1092. struct port_info *p = netdev_priv(dev);
  1093. return p->rx_csum_offload;
  1094. }
  1095. static int set_rx_csum(struct net_device *dev, u32 data)
  1096. {
  1097. struct port_info *p = netdev_priv(dev);
  1098. p->rx_csum_offload = data;
  1099. return 0;
  1100. }
  1101. static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
  1102. {
  1103. struct adapter *adapter = dev->priv;
  1104. e->rx_max_pending = MAX_RX_BUFFERS;
  1105. e->rx_mini_max_pending = 0;
  1106. e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
  1107. e->tx_max_pending = MAX_TXQ_ENTRIES;
  1108. e->rx_pending = adapter->params.sge.qset[0].fl_size;
  1109. e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
  1110. e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
  1111. e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
  1112. }
  1113. static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
  1114. {
  1115. int i;
  1116. struct adapter *adapter = dev->priv;
  1117. if (e->rx_pending > MAX_RX_BUFFERS ||
  1118. e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
  1119. e->tx_pending > MAX_TXQ_ENTRIES ||
  1120. e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
  1121. e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
  1122. e->rx_pending < MIN_FL_ENTRIES ||
  1123. e->rx_jumbo_pending < MIN_FL_ENTRIES ||
  1124. e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
  1125. return -EINVAL;
  1126. if (adapter->flags & FULL_INIT_DONE)
  1127. return -EBUSY;
  1128. for (i = 0; i < SGE_QSETS; ++i) {
  1129. struct qset_params *q = &adapter->params.sge.qset[i];
  1130. q->rspq_size = e->rx_mini_pending;
  1131. q->fl_size = e->rx_pending;
  1132. q->jumbo_size = e->rx_jumbo_pending;
  1133. q->txq_size[0] = e->tx_pending;
  1134. q->txq_size[1] = e->tx_pending;
  1135. q->txq_size[2] = e->tx_pending;
  1136. }
  1137. return 0;
  1138. }
  1139. static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
  1140. {
  1141. struct adapter *adapter = dev->priv;
  1142. struct qset_params *qsp = &adapter->params.sge.qset[0];
  1143. struct sge_qset *qs = &adapter->sge.qs[0];
  1144. if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
  1145. return -EINVAL;
  1146. qsp->coalesce_usecs = c->rx_coalesce_usecs;
  1147. t3_update_qset_coalesce(qs, qsp);
  1148. return 0;
  1149. }
  1150. static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
  1151. {
  1152. struct adapter *adapter = dev->priv;
  1153. struct qset_params *q = adapter->params.sge.qset;
  1154. c->rx_coalesce_usecs = q->coalesce_usecs;
  1155. return 0;
  1156. }
  1157. static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
  1158. u8 * data)
  1159. {
  1160. int i, err = 0;
  1161. struct adapter *adapter = dev->priv;
  1162. u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
  1163. if (!buf)
  1164. return -ENOMEM;
  1165. e->magic = EEPROM_MAGIC;
  1166. for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
  1167. err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
  1168. if (!err)
  1169. memcpy(data, buf + e->offset, e->len);
  1170. kfree(buf);
  1171. return err;
  1172. }
  1173. static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  1174. u8 * data)
  1175. {
  1176. u8 *buf;
  1177. int err = 0;
  1178. u32 aligned_offset, aligned_len, *p;
  1179. struct adapter *adapter = dev->priv;
  1180. if (eeprom->magic != EEPROM_MAGIC)
  1181. return -EINVAL;
  1182. aligned_offset = eeprom->offset & ~3;
  1183. aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
  1184. if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
  1185. buf = kmalloc(aligned_len, GFP_KERNEL);
  1186. if (!buf)
  1187. return -ENOMEM;
  1188. err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
  1189. if (!err && aligned_len > 4)
  1190. err = t3_seeprom_read(adapter,
  1191. aligned_offset + aligned_len - 4,
  1192. (u32 *) & buf[aligned_len - 4]);
  1193. if (err)
  1194. goto out;
  1195. memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
  1196. } else
  1197. buf = data;
  1198. err = t3_seeprom_wp(adapter, 0);
  1199. if (err)
  1200. goto out;
  1201. for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
  1202. err = t3_seeprom_write(adapter, aligned_offset, *p);
  1203. aligned_offset += 4;
  1204. }
  1205. if (!err)
  1206. err = t3_seeprom_wp(adapter, 1);
  1207. out:
  1208. if (buf != data)
  1209. kfree(buf);
  1210. return err;
  1211. }
  1212. static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1213. {
  1214. wol->supported = 0;
  1215. wol->wolopts = 0;
  1216. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1217. }
  1218. static const struct ethtool_ops cxgb_ethtool_ops = {
  1219. .get_settings = get_settings,
  1220. .set_settings = set_settings,
  1221. .get_drvinfo = get_drvinfo,
  1222. .get_msglevel = get_msglevel,
  1223. .set_msglevel = set_msglevel,
  1224. .get_ringparam = get_sge_param,
  1225. .set_ringparam = set_sge_param,
  1226. .get_coalesce = get_coalesce,
  1227. .set_coalesce = set_coalesce,
  1228. .get_eeprom_len = get_eeprom_len,
  1229. .get_eeprom = get_eeprom,
  1230. .set_eeprom = set_eeprom,
  1231. .get_pauseparam = get_pauseparam,
  1232. .set_pauseparam = set_pauseparam,
  1233. .get_rx_csum = get_rx_csum,
  1234. .set_rx_csum = set_rx_csum,
  1235. .get_tx_csum = ethtool_op_get_tx_csum,
  1236. .set_tx_csum = ethtool_op_set_tx_csum,
  1237. .get_sg = ethtool_op_get_sg,
  1238. .set_sg = ethtool_op_set_sg,
  1239. .get_link = ethtool_op_get_link,
  1240. .get_strings = get_strings,
  1241. .phys_id = cxgb3_phys_id,
  1242. .nway_reset = restart_autoneg,
  1243. .get_stats_count = get_stats_count,
  1244. .get_ethtool_stats = get_stats,
  1245. .get_regs_len = get_regs_len,
  1246. .get_regs = get_regs,
  1247. .get_wol = get_wol,
  1248. .get_tso = ethtool_op_get_tso,
  1249. .set_tso = ethtool_op_set_tso,
  1250. .get_perm_addr = ethtool_op_get_perm_addr
  1251. };
  1252. static int in_range(int val, int lo, int hi)
  1253. {
  1254. return val < 0 || (val <= hi && val >= lo);
  1255. }
  1256. static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
  1257. {
  1258. int ret;
  1259. u32 cmd;
  1260. struct adapter *adapter = dev->priv;
  1261. if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
  1262. return -EFAULT;
  1263. switch (cmd) {
  1264. case CHELSIO_SETREG:{
  1265. struct ch_reg edata;
  1266. if (!capable(CAP_NET_ADMIN))
  1267. return -EPERM;
  1268. if (copy_from_user(&edata, useraddr, sizeof(edata)))
  1269. return -EFAULT;
  1270. if ((edata.addr & 3) != 0
  1271. || edata.addr >= adapter->mmio_len)
  1272. return -EINVAL;
  1273. writel(edata.val, adapter->regs + edata.addr);
  1274. break;
  1275. }
  1276. case CHELSIO_GETREG:{
  1277. struct ch_reg edata;
  1278. if (copy_from_user(&edata, useraddr, sizeof(edata)))
  1279. return -EFAULT;
  1280. if ((edata.addr & 3) != 0
  1281. || edata.addr >= adapter->mmio_len)
  1282. return -EINVAL;
  1283. edata.val = readl(adapter->regs + edata.addr);
  1284. if (copy_to_user(useraddr, &edata, sizeof(edata)))
  1285. return -EFAULT;
  1286. break;
  1287. }
  1288. case CHELSIO_SET_QSET_PARAMS:{
  1289. int i;
  1290. struct qset_params *q;
  1291. struct ch_qset_params t;
  1292. if (!capable(CAP_NET_ADMIN))
  1293. return -EPERM;
  1294. if (copy_from_user(&t, useraddr, sizeof(t)))
  1295. return -EFAULT;
  1296. if (t.qset_idx >= SGE_QSETS)
  1297. return -EINVAL;
  1298. if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
  1299. !in_range(t.cong_thres, 0, 255) ||
  1300. !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
  1301. MAX_TXQ_ENTRIES) ||
  1302. !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
  1303. MAX_TXQ_ENTRIES) ||
  1304. !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
  1305. MAX_CTRL_TXQ_ENTRIES) ||
  1306. !in_range(t.fl_size[0], MIN_FL_ENTRIES,
  1307. MAX_RX_BUFFERS)
  1308. || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
  1309. MAX_RX_JUMBO_BUFFERS)
  1310. || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
  1311. MAX_RSPQ_ENTRIES))
  1312. return -EINVAL;
  1313. if ((adapter->flags & FULL_INIT_DONE) &&
  1314. (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
  1315. t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
  1316. t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
  1317. t.polling >= 0 || t.cong_thres >= 0))
  1318. return -EBUSY;
  1319. q = &adapter->params.sge.qset[t.qset_idx];
  1320. if (t.rspq_size >= 0)
  1321. q->rspq_size = t.rspq_size;
  1322. if (t.fl_size[0] >= 0)
  1323. q->fl_size = t.fl_size[0];
  1324. if (t.fl_size[1] >= 0)
  1325. q->jumbo_size = t.fl_size[1];
  1326. if (t.txq_size[0] >= 0)
  1327. q->txq_size[0] = t.txq_size[0];
  1328. if (t.txq_size[1] >= 0)
  1329. q->txq_size[1] = t.txq_size[1];
  1330. if (t.txq_size[2] >= 0)
  1331. q->txq_size[2] = t.txq_size[2];
  1332. if (t.cong_thres >= 0)
  1333. q->cong_thres = t.cong_thres;
  1334. if (t.intr_lat >= 0) {
  1335. struct sge_qset *qs =
  1336. &adapter->sge.qs[t.qset_idx];
  1337. q->coalesce_usecs = t.intr_lat;
  1338. t3_update_qset_coalesce(qs, q);
  1339. }
  1340. if (t.polling >= 0) {
  1341. if (adapter->flags & USING_MSIX)
  1342. q->polling = t.polling;
  1343. else {
  1344. /* No polling with INTx for T3A */
  1345. if (adapter->params.rev == 0 &&
  1346. !(adapter->flags & USING_MSI))
  1347. t.polling = 0;
  1348. for (i = 0; i < SGE_QSETS; i++) {
  1349. q = &adapter->params.sge.
  1350. qset[i];
  1351. q->polling = t.polling;
  1352. }
  1353. }
  1354. }
  1355. break;
  1356. }
  1357. case CHELSIO_GET_QSET_PARAMS:{
  1358. struct qset_params *q;
  1359. struct ch_qset_params t;
  1360. if (copy_from_user(&t, useraddr, sizeof(t)))
  1361. return -EFAULT;
  1362. if (t.qset_idx >= SGE_QSETS)
  1363. return -EINVAL;
  1364. q = &adapter->params.sge.qset[t.qset_idx];
  1365. t.rspq_size = q->rspq_size;
  1366. t.txq_size[0] = q->txq_size[0];
  1367. t.txq_size[1] = q->txq_size[1];
  1368. t.txq_size[2] = q->txq_size[2];
  1369. t.fl_size[0] = q->fl_size;
  1370. t.fl_size[1] = q->jumbo_size;
  1371. t.polling = q->polling;
  1372. t.intr_lat = q->coalesce_usecs;
  1373. t.cong_thres = q->cong_thres;
  1374. if (copy_to_user(useraddr, &t, sizeof(t)))
  1375. return -EFAULT;
  1376. break;
  1377. }
  1378. case CHELSIO_SET_QSET_NUM:{
  1379. struct ch_reg edata;
  1380. struct port_info *pi = netdev_priv(dev);
  1381. unsigned int i, first_qset = 0, other_qsets = 0;
  1382. if (!capable(CAP_NET_ADMIN))
  1383. return -EPERM;
  1384. if (adapter->flags & FULL_INIT_DONE)
  1385. return -EBUSY;
  1386. if (copy_from_user(&edata, useraddr, sizeof(edata)))
  1387. return -EFAULT;
  1388. if (edata.val < 1 ||
  1389. (edata.val > 1 && !(adapter->flags & USING_MSIX)))
  1390. return -EINVAL;
  1391. for_each_port(adapter, i)
  1392. if (adapter->port[i] && adapter->port[i] != dev)
  1393. other_qsets += adap2pinfo(adapter, i)->nqsets;
  1394. if (edata.val + other_qsets > SGE_QSETS)
  1395. return -EINVAL;
  1396. pi->nqsets = edata.val;
  1397. for_each_port(adapter, i)
  1398. if (adapter->port[i]) {
  1399. pi = adap2pinfo(adapter, i);
  1400. pi->first_qset = first_qset;
  1401. first_qset += pi->nqsets;
  1402. }
  1403. break;
  1404. }
  1405. case CHELSIO_GET_QSET_NUM:{
  1406. struct ch_reg edata;
  1407. struct port_info *pi = netdev_priv(dev);
  1408. edata.cmd = CHELSIO_GET_QSET_NUM;
  1409. edata.val = pi->nqsets;
  1410. if (copy_to_user(useraddr, &edata, sizeof(edata)))
  1411. return -EFAULT;
  1412. break;
  1413. }
  1414. case CHELSIO_LOAD_FW:{
  1415. u8 *fw_data;
  1416. struct ch_mem_range t;
  1417. if (!capable(CAP_NET_ADMIN))
  1418. return -EPERM;
  1419. if (copy_from_user(&t, useraddr, sizeof(t)))
  1420. return -EFAULT;
  1421. fw_data = kmalloc(t.len, GFP_KERNEL);
  1422. if (!fw_data)
  1423. return -ENOMEM;
  1424. if (copy_from_user
  1425. (fw_data, useraddr + sizeof(t), t.len)) {
  1426. kfree(fw_data);
  1427. return -EFAULT;
  1428. }
  1429. ret = t3_load_fw(adapter, fw_data, t.len);
  1430. kfree(fw_data);
  1431. if (ret)
  1432. return ret;
  1433. break;
  1434. }
  1435. case CHELSIO_SETMTUTAB:{
  1436. struct ch_mtus m;
  1437. int i;
  1438. if (!is_offload(adapter))
  1439. return -EOPNOTSUPP;
  1440. if (!capable(CAP_NET_ADMIN))
  1441. return -EPERM;
  1442. if (offload_running(adapter))
  1443. return -EBUSY;
  1444. if (copy_from_user(&m, useraddr, sizeof(m)))
  1445. return -EFAULT;
  1446. if (m.nmtus != NMTUS)
  1447. return -EINVAL;
  1448. if (m.mtus[0] < 81) /* accommodate SACK */
  1449. return -EINVAL;
  1450. /* MTUs must be in ascending order */
  1451. for (i = 1; i < NMTUS; ++i)
  1452. if (m.mtus[i] < m.mtus[i - 1])
  1453. return -EINVAL;
  1454. memcpy(adapter->params.mtus, m.mtus,
  1455. sizeof(adapter->params.mtus));
  1456. break;
  1457. }
  1458. case CHELSIO_GET_PM:{
  1459. struct tp_params *p = &adapter->params.tp;
  1460. struct ch_pm m = {.cmd = CHELSIO_GET_PM };
  1461. if (!is_offload(adapter))
  1462. return -EOPNOTSUPP;
  1463. m.tx_pg_sz = p->tx_pg_size;
  1464. m.tx_num_pg = p->tx_num_pgs;
  1465. m.rx_pg_sz = p->rx_pg_size;
  1466. m.rx_num_pg = p->rx_num_pgs;
  1467. m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
  1468. if (copy_to_user(useraddr, &m, sizeof(m)))
  1469. return -EFAULT;
  1470. break;
  1471. }
  1472. case CHELSIO_SET_PM:{
  1473. struct ch_pm m;
  1474. struct tp_params *p = &adapter->params.tp;
  1475. if (!is_offload(adapter))
  1476. return -EOPNOTSUPP;
  1477. if (!capable(CAP_NET_ADMIN))
  1478. return -EPERM;
  1479. if (adapter->flags & FULL_INIT_DONE)
  1480. return -EBUSY;
  1481. if (copy_from_user(&m, useraddr, sizeof(m)))
  1482. return -EFAULT;
  1483. if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
  1484. !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
  1485. return -EINVAL; /* not power of 2 */
  1486. if (!(m.rx_pg_sz & 0x14000))
  1487. return -EINVAL; /* not 16KB or 64KB */
  1488. if (!(m.tx_pg_sz & 0x1554000))
  1489. return -EINVAL;
  1490. if (m.tx_num_pg == -1)
  1491. m.tx_num_pg = p->tx_num_pgs;
  1492. if (m.rx_num_pg == -1)
  1493. m.rx_num_pg = p->rx_num_pgs;
  1494. if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
  1495. return -EINVAL;
  1496. if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
  1497. m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
  1498. return -EINVAL;
  1499. p->rx_pg_size = m.rx_pg_sz;
  1500. p->tx_pg_size = m.tx_pg_sz;
  1501. p->rx_num_pgs = m.rx_num_pg;
  1502. p->tx_num_pgs = m.tx_num_pg;
  1503. break;
  1504. }
  1505. case CHELSIO_GET_MEM:{
  1506. struct ch_mem_range t;
  1507. struct mc7 *mem;
  1508. u64 buf[32];
  1509. if (!is_offload(adapter))
  1510. return -EOPNOTSUPP;
  1511. if (!(adapter->flags & FULL_INIT_DONE))
  1512. return -EIO; /* need the memory controllers */
  1513. if (copy_from_user(&t, useraddr, sizeof(t)))
  1514. return -EFAULT;
  1515. if ((t.addr & 7) || (t.len & 7))
  1516. return -EINVAL;
  1517. if (t.mem_id == MEM_CM)
  1518. mem = &adapter->cm;
  1519. else if (t.mem_id == MEM_PMRX)
  1520. mem = &adapter->pmrx;
  1521. else if (t.mem_id == MEM_PMTX)
  1522. mem = &adapter->pmtx;
  1523. else
  1524. return -EINVAL;
  1525. /*
  1526. * Version scheme:
  1527. * bits 0..9: chip version
  1528. * bits 10..15: chip revision
  1529. */
  1530. t.version = 3 | (adapter->params.rev << 10);
  1531. if (copy_to_user(useraddr, &t, sizeof(t)))
  1532. return -EFAULT;
  1533. /*
  1534. * Read 256 bytes at a time as len can be large and we don't
  1535. * want to use huge intermediate buffers.
  1536. */
  1537. useraddr += sizeof(t); /* advance to start of buffer */
  1538. while (t.len) {
  1539. unsigned int chunk =
  1540. min_t(unsigned int, t.len, sizeof(buf));
  1541. ret =
  1542. t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
  1543. buf);
  1544. if (ret)
  1545. return ret;
  1546. if (copy_to_user(useraddr, buf, chunk))
  1547. return -EFAULT;
  1548. useraddr += chunk;
  1549. t.addr += chunk;
  1550. t.len -= chunk;
  1551. }
  1552. break;
  1553. }
  1554. case CHELSIO_SET_TRACE_FILTER:{
  1555. struct ch_trace t;
  1556. const struct trace_params *tp;
  1557. if (!capable(CAP_NET_ADMIN))
  1558. return -EPERM;
  1559. if (!offload_running(adapter))
  1560. return -EAGAIN;
  1561. if (copy_from_user(&t, useraddr, sizeof(t)))
  1562. return -EFAULT;
  1563. tp = (const struct trace_params *)&t.sip;
  1564. if (t.config_tx)
  1565. t3_config_trace_filter(adapter, tp, 0,
  1566. t.invert_match,
  1567. t.trace_tx);
  1568. if (t.config_rx)
  1569. t3_config_trace_filter(adapter, tp, 1,
  1570. t.invert_match,
  1571. t.trace_rx);
  1572. break;
  1573. }
  1574. case CHELSIO_SET_PKTSCHED:{
  1575. struct sk_buff *skb;
  1576. struct ch_pktsched_params p;
  1577. struct mngt_pktsched_wr *req;
  1578. if (!(adapter->flags & FULL_INIT_DONE))
  1579. return -EIO; /* uP must be up and running */
  1580. if (copy_from_user(&p, useraddr, sizeof(p)))
  1581. return -EFAULT;
  1582. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  1583. if (!skb)
  1584. return -ENOMEM;
  1585. req =
  1586. (struct mngt_pktsched_wr *)skb_put(skb,
  1587. sizeof(*req));
  1588. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
  1589. req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
  1590. req->sched = p.sched;
  1591. req->idx = p.idx;
  1592. req->min = p.min;
  1593. req->max = p.max;
  1594. req->binding = p.binding;
  1595. printk(KERN_INFO
  1596. "pktsched: sched %u idx %u min %u max %u binding %u\n",
  1597. req->sched, req->idx, req->min, req->max,
  1598. req->binding);
  1599. skb->priority = 1;
  1600. offload_tx(&adapter->tdev, skb);
  1601. break;
  1602. }
  1603. default:
  1604. return -EOPNOTSUPP;
  1605. }
  1606. return 0;
  1607. }
  1608. static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  1609. {
  1610. int ret, mmd;
  1611. struct adapter *adapter = dev->priv;
  1612. struct port_info *pi = netdev_priv(dev);
  1613. struct mii_ioctl_data *data = if_mii(req);
  1614. switch (cmd) {
  1615. case SIOCGMIIPHY:
  1616. data->phy_id = pi->phy.addr;
  1617. /* FALLTHRU */
  1618. case SIOCGMIIREG:{
  1619. u32 val;
  1620. struct cphy *phy = &pi->phy;
  1621. if (!phy->mdio_read)
  1622. return -EOPNOTSUPP;
  1623. if (is_10G(adapter)) {
  1624. mmd = data->phy_id >> 8;
  1625. if (!mmd)
  1626. mmd = MDIO_DEV_PCS;
  1627. else if (mmd > MDIO_DEV_XGXS)
  1628. return -EINVAL;
  1629. ret =
  1630. phy->mdio_read(adapter, data->phy_id & 0x1f,
  1631. mmd, data->reg_num, &val);
  1632. } else
  1633. ret =
  1634. phy->mdio_read(adapter, data->phy_id & 0x1f,
  1635. 0, data->reg_num & 0x1f,
  1636. &val);
  1637. if (!ret)
  1638. data->val_out = val;
  1639. break;
  1640. }
  1641. case SIOCSMIIREG:{
  1642. struct cphy *phy = &pi->phy;
  1643. if (!capable(CAP_NET_ADMIN))
  1644. return -EPERM;
  1645. if (!phy->mdio_write)
  1646. return -EOPNOTSUPP;
  1647. if (is_10G(adapter)) {
  1648. mmd = data->phy_id >> 8;
  1649. if (!mmd)
  1650. mmd = MDIO_DEV_PCS;
  1651. else if (mmd > MDIO_DEV_XGXS)
  1652. return -EINVAL;
  1653. ret =
  1654. phy->mdio_write(adapter,
  1655. data->phy_id & 0x1f, mmd,
  1656. data->reg_num,
  1657. data->val_in);
  1658. } else
  1659. ret =
  1660. phy->mdio_write(adapter,
  1661. data->phy_id & 0x1f, 0,
  1662. data->reg_num & 0x1f,
  1663. data->val_in);
  1664. break;
  1665. }
  1666. case SIOCCHIOCTL:
  1667. return cxgb_extension_ioctl(dev, req->ifr_data);
  1668. default:
  1669. return -EOPNOTSUPP;
  1670. }
  1671. return ret;
  1672. }
  1673. static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
  1674. {
  1675. int ret;
  1676. struct adapter *adapter = dev->priv;
  1677. struct port_info *pi = netdev_priv(dev);
  1678. if (new_mtu < 81) /* accommodate SACK */
  1679. return -EINVAL;
  1680. if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
  1681. return ret;
  1682. dev->mtu = new_mtu;
  1683. init_port_mtus(adapter);
  1684. if (adapter->params.rev == 0 && offload_running(adapter))
  1685. t3_load_mtus(adapter, adapter->params.mtus,
  1686. adapter->params.a_wnd, adapter->params.b_wnd,
  1687. adapter->port[0]->mtu);
  1688. return 0;
  1689. }
  1690. static int cxgb_set_mac_addr(struct net_device *dev, void *p)
  1691. {
  1692. struct adapter *adapter = dev->priv;
  1693. struct port_info *pi = netdev_priv(dev);
  1694. struct sockaddr *addr = p;
  1695. if (!is_valid_ether_addr(addr->sa_data))
  1696. return -EINVAL;
  1697. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1698. t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
  1699. if (offload_running(adapter))
  1700. write_smt_entry(adapter, pi->port_id);
  1701. return 0;
  1702. }
  1703. /**
  1704. * t3_synchronize_rx - wait for current Rx processing on a port to complete
  1705. * @adap: the adapter
  1706. * @p: the port
  1707. *
  1708. * Ensures that current Rx processing on any of the queues associated with
  1709. * the given port completes before returning. We do this by acquiring and
  1710. * releasing the locks of the response queues associated with the port.
  1711. */
  1712. static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
  1713. {
  1714. int i;
  1715. for (i = 0; i < p->nqsets; i++) {
  1716. struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
  1717. spin_lock_irq(&q->lock);
  1718. spin_unlock_irq(&q->lock);
  1719. }
  1720. }
  1721. static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  1722. {
  1723. struct adapter *adapter = dev->priv;
  1724. struct port_info *pi = netdev_priv(dev);
  1725. pi->vlan_grp = grp;
  1726. if (adapter->params.rev > 0)
  1727. t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
  1728. else {
  1729. /* single control for all ports */
  1730. unsigned int i, have_vlans = 0;
  1731. for_each_port(adapter, i)
  1732. have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
  1733. t3_set_vlan_accel(adapter, 1, have_vlans);
  1734. }
  1735. t3_synchronize_rx(adapter, pi);
  1736. }
  1737. static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  1738. {
  1739. /* nothing */
  1740. }
  1741. #ifdef CONFIG_NET_POLL_CONTROLLER
  1742. static void cxgb_netpoll(struct net_device *dev)
  1743. {
  1744. struct adapter *adapter = dev->priv;
  1745. struct sge_qset *qs = dev2qset(dev);
  1746. t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
  1747. adapter);
  1748. }
  1749. #endif
  1750. /*
  1751. * Periodic accumulation of MAC statistics.
  1752. */
  1753. static void mac_stats_update(struct adapter *adapter)
  1754. {
  1755. int i;
  1756. for_each_port(adapter, i) {
  1757. struct net_device *dev = adapter->port[i];
  1758. struct port_info *p = netdev_priv(dev);
  1759. if (netif_running(dev)) {
  1760. spin_lock(&adapter->stats_lock);
  1761. t3_mac_update_stats(&p->mac);
  1762. spin_unlock(&adapter->stats_lock);
  1763. }
  1764. }
  1765. }
  1766. static void check_link_status(struct adapter *adapter)
  1767. {
  1768. int i;
  1769. for_each_port(adapter, i) {
  1770. struct net_device *dev = adapter->port[i];
  1771. struct port_info *p = netdev_priv(dev);
  1772. if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
  1773. t3_link_changed(adapter, i);
  1774. }
  1775. }
  1776. static void t3_adap_check_task(struct work_struct *work)
  1777. {
  1778. struct adapter *adapter = container_of(work, struct adapter,
  1779. adap_check_task.work);
  1780. const struct adapter_params *p = &adapter->params;
  1781. adapter->check_task_cnt++;
  1782. /* Check link status for PHYs without interrupts */
  1783. if (p->linkpoll_period)
  1784. check_link_status(adapter);
  1785. /* Accumulate MAC stats if needed */
  1786. if (!p->linkpoll_period ||
  1787. (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
  1788. p->stats_update_period) {
  1789. mac_stats_update(adapter);
  1790. adapter->check_task_cnt = 0;
  1791. }
  1792. /* Schedule the next check update if any port is active. */
  1793. spin_lock(&adapter->work_lock);
  1794. if (adapter->open_device_map & PORT_MASK)
  1795. schedule_chk_task(adapter);
  1796. spin_unlock(&adapter->work_lock);
  1797. }
  1798. /*
  1799. * Processes external (PHY) interrupts in process context.
  1800. */
  1801. static void ext_intr_task(struct work_struct *work)
  1802. {
  1803. struct adapter *adapter = container_of(work, struct adapter,
  1804. ext_intr_handler_task);
  1805. t3_phy_intr_handler(adapter);
  1806. /* Now reenable external interrupts */
  1807. spin_lock_irq(&adapter->work_lock);
  1808. if (adapter->slow_intr_mask) {
  1809. adapter->slow_intr_mask |= F_T3DBG;
  1810. t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
  1811. t3_write_reg(adapter, A_PL_INT_ENABLE0,
  1812. adapter->slow_intr_mask);
  1813. }
  1814. spin_unlock_irq(&adapter->work_lock);
  1815. }
  1816. /*
  1817. * Interrupt-context handler for external (PHY) interrupts.
  1818. */
  1819. void t3_os_ext_intr_handler(struct adapter *adapter)
  1820. {
  1821. /*
  1822. * Schedule a task to handle external interrupts as they may be slow
  1823. * and we use a mutex to protect MDIO registers. We disable PHY
  1824. * interrupts in the meantime and let the task reenable them when
  1825. * it's done.
  1826. */
  1827. spin_lock(&adapter->work_lock);
  1828. if (adapter->slow_intr_mask) {
  1829. adapter->slow_intr_mask &= ~F_T3DBG;
  1830. t3_write_reg(adapter, A_PL_INT_ENABLE0,
  1831. adapter->slow_intr_mask);
  1832. queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
  1833. }
  1834. spin_unlock(&adapter->work_lock);
  1835. }
  1836. void t3_fatal_err(struct adapter *adapter)
  1837. {
  1838. unsigned int fw_status[4];
  1839. if (adapter->flags & FULL_INIT_DONE) {
  1840. t3_sge_stop(adapter);
  1841. t3_intr_disable(adapter);
  1842. }
  1843. CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
  1844. if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
  1845. CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
  1846. fw_status[0], fw_status[1],
  1847. fw_status[2], fw_status[3]);
  1848. }
  1849. static int __devinit cxgb_enable_msix(struct adapter *adap)
  1850. {
  1851. struct msix_entry entries[SGE_QSETS + 1];
  1852. int i, err;
  1853. for (i = 0; i < ARRAY_SIZE(entries); ++i)
  1854. entries[i].entry = i;
  1855. err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
  1856. if (!err) {
  1857. for (i = 0; i < ARRAY_SIZE(entries); ++i)
  1858. adap->msix_info[i].vec = entries[i].vector;
  1859. } else if (err > 0)
  1860. dev_info(&adap->pdev->dev,
  1861. "only %d MSI-X vectors left, not using MSI-X\n", err);
  1862. return err;
  1863. }
  1864. static void __devinit print_port_info(struct adapter *adap,
  1865. const struct adapter_info *ai)
  1866. {
  1867. static const char *pci_variant[] = {
  1868. "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
  1869. };
  1870. int i;
  1871. char buf[80];
  1872. if (is_pcie(adap))
  1873. snprintf(buf, sizeof(buf), "%s x%d",
  1874. pci_variant[adap->params.pci.variant],
  1875. adap->params.pci.width);
  1876. else
  1877. snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
  1878. pci_variant[adap->params.pci.variant],
  1879. adap->params.pci.speed, adap->params.pci.width);
  1880. for_each_port(adap, i) {
  1881. struct net_device *dev = adap->port[i];
  1882. const struct port_info *pi = netdev_priv(dev);
  1883. if (!test_bit(i, &adap->registered_device_map))
  1884. continue;
  1885. printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
  1886. dev->name, ai->desc, pi->port_type->desc,
  1887. adap->params.rev, buf,
  1888. (adap->flags & USING_MSIX) ? " MSI-X" :
  1889. (adap->flags & USING_MSI) ? " MSI" : "");
  1890. if (adap->name == dev->name && adap->params.vpd.mclk)
  1891. printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
  1892. adap->name, t3_mc7_size(&adap->cm) >> 20,
  1893. t3_mc7_size(&adap->pmtx) >> 20,
  1894. t3_mc7_size(&adap->pmrx) >> 20);
  1895. }
  1896. }
  1897. static int __devinit init_one(struct pci_dev *pdev,
  1898. const struct pci_device_id *ent)
  1899. {
  1900. static int version_printed;
  1901. int i, err, pci_using_dac = 0;
  1902. unsigned long mmio_start, mmio_len;
  1903. const struct adapter_info *ai;
  1904. struct adapter *adapter = NULL;
  1905. struct port_info *pi;
  1906. if (!version_printed) {
  1907. printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
  1908. ++version_printed;
  1909. }
  1910. if (!cxgb3_wq) {
  1911. cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
  1912. if (!cxgb3_wq) {
  1913. printk(KERN_ERR DRV_NAME
  1914. ": cannot initialize work queue\n");
  1915. return -ENOMEM;
  1916. }
  1917. }
  1918. err = pci_request_regions(pdev, DRV_NAME);
  1919. if (err) {
  1920. /* Just info, some other driver may have claimed the device. */
  1921. dev_info(&pdev->dev, "cannot obtain PCI resources\n");
  1922. return err;
  1923. }
  1924. err = pci_enable_device(pdev);
  1925. if (err) {
  1926. dev_err(&pdev->dev, "cannot enable PCI device\n");
  1927. goto out_release_regions;
  1928. }
  1929. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  1930. pci_using_dac = 1;
  1931. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  1932. if (err) {
  1933. dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
  1934. "coherent allocations\n");
  1935. goto out_disable_device;
  1936. }
  1937. } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
  1938. dev_err(&pdev->dev, "no usable DMA configuration\n");
  1939. goto out_disable_device;
  1940. }
  1941. pci_set_master(pdev);
  1942. mmio_start = pci_resource_start(pdev, 0);
  1943. mmio_len = pci_resource_len(pdev, 0);
  1944. ai = t3_get_adapter_info(ent->driver_data);
  1945. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  1946. if (!adapter) {
  1947. err = -ENOMEM;
  1948. goto out_disable_device;
  1949. }
  1950. adapter->regs = ioremap_nocache(mmio_start, mmio_len);
  1951. if (!adapter->regs) {
  1952. dev_err(&pdev->dev, "cannot map device registers\n");
  1953. err = -ENOMEM;
  1954. goto out_free_adapter;
  1955. }
  1956. adapter->pdev = pdev;
  1957. adapter->name = pci_name(pdev);
  1958. adapter->msg_enable = dflt_msg_enable;
  1959. adapter->mmio_len = mmio_len;
  1960. mutex_init(&adapter->mdio_lock);
  1961. spin_lock_init(&adapter->work_lock);
  1962. spin_lock_init(&adapter->stats_lock);
  1963. INIT_LIST_HEAD(&adapter->adapter_list);
  1964. INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
  1965. INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
  1966. for (i = 0; i < ai->nports; ++i) {
  1967. struct net_device *netdev;
  1968. netdev = alloc_etherdev(sizeof(struct port_info));
  1969. if (!netdev) {
  1970. err = -ENOMEM;
  1971. goto out_free_dev;
  1972. }
  1973. SET_MODULE_OWNER(netdev);
  1974. SET_NETDEV_DEV(netdev, &pdev->dev);
  1975. adapter->port[i] = netdev;
  1976. pi = netdev_priv(netdev);
  1977. pi->rx_csum_offload = 1;
  1978. pi->nqsets = 1;
  1979. pi->first_qset = i;
  1980. pi->activity = 0;
  1981. pi->port_id = i;
  1982. netif_carrier_off(netdev);
  1983. netdev->irq = pdev->irq;
  1984. netdev->mem_start = mmio_start;
  1985. netdev->mem_end = mmio_start + mmio_len - 1;
  1986. netdev->priv = adapter;
  1987. netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
  1988. netdev->features |= NETIF_F_LLTX;
  1989. if (pci_using_dac)
  1990. netdev->features |= NETIF_F_HIGHDMA;
  1991. netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  1992. netdev->vlan_rx_register = vlan_rx_register;
  1993. netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
  1994. netdev->open = cxgb_open;
  1995. netdev->stop = cxgb_close;
  1996. netdev->hard_start_xmit = t3_eth_xmit;
  1997. netdev->get_stats = cxgb_get_stats;
  1998. netdev->set_multicast_list = cxgb_set_rxmode;
  1999. netdev->do_ioctl = cxgb_ioctl;
  2000. netdev->change_mtu = cxgb_change_mtu;
  2001. netdev->set_mac_address = cxgb_set_mac_addr;
  2002. #ifdef CONFIG_NET_POLL_CONTROLLER
  2003. netdev->poll_controller = cxgb_netpoll;
  2004. #endif
  2005. netdev->weight = 64;
  2006. SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  2007. }
  2008. pci_set_drvdata(pdev, adapter->port[0]);
  2009. if (t3_prep_adapter(adapter, ai, 1) < 0) {
  2010. err = -ENODEV;
  2011. goto out_free_dev;
  2012. }
  2013. /*
  2014. * The card is now ready to go. If any errors occur during device
  2015. * registration we do not fail the whole card but rather proceed only
  2016. * with the ports we manage to register successfully. However we must
  2017. * register at least one net device.
  2018. */
  2019. for_each_port(adapter, i) {
  2020. err = register_netdev(adapter->port[i]);
  2021. if (err)
  2022. dev_warn(&pdev->dev,
  2023. "cannot register net device %s, skipping\n",
  2024. adapter->port[i]->name);
  2025. else {
  2026. /*
  2027. * Change the name we use for messages to the name of
  2028. * the first successfully registered interface.
  2029. */
  2030. if (!adapter->registered_device_map)
  2031. adapter->name = adapter->port[i]->name;
  2032. __set_bit(i, &adapter->registered_device_map);
  2033. }
  2034. }
  2035. if (!adapter->registered_device_map) {
  2036. dev_err(&pdev->dev, "could not register any net devices\n");
  2037. goto out_free_dev;
  2038. }
  2039. /* Driver's ready. Reflect it on LEDs */
  2040. t3_led_ready(adapter);
  2041. if (is_offload(adapter)) {
  2042. __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
  2043. cxgb3_adapter_ofld(adapter);
  2044. }
  2045. /* See what interrupts we'll be using */
  2046. if (msi > 1 && cxgb_enable_msix(adapter) == 0)
  2047. adapter->flags |= USING_MSIX;
  2048. else if (msi > 0 && pci_enable_msi(pdev) == 0)
  2049. adapter->flags |= USING_MSI;
  2050. err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
  2051. &cxgb3_attr_group);
  2052. print_port_info(adapter, ai);
  2053. return 0;
  2054. out_free_dev:
  2055. iounmap(adapter->regs);
  2056. for (i = ai->nports - 1; i >= 0; --i)
  2057. if (adapter->port[i])
  2058. free_netdev(adapter->port[i]);
  2059. out_free_adapter:
  2060. kfree(adapter);
  2061. out_disable_device:
  2062. pci_disable_device(pdev);
  2063. out_release_regions:
  2064. pci_release_regions(pdev);
  2065. pci_set_drvdata(pdev, NULL);
  2066. return err;
  2067. }
  2068. static void __devexit remove_one(struct pci_dev *pdev)
  2069. {
  2070. struct net_device *dev = pci_get_drvdata(pdev);
  2071. if (dev) {
  2072. int i;
  2073. struct adapter *adapter = dev->priv;
  2074. t3_sge_stop(adapter);
  2075. sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
  2076. &cxgb3_attr_group);
  2077. for_each_port(adapter, i)
  2078. if (test_bit(i, &adapter->registered_device_map))
  2079. unregister_netdev(adapter->port[i]);
  2080. if (is_offload(adapter)) {
  2081. cxgb3_adapter_unofld(adapter);
  2082. if (test_bit(OFFLOAD_DEVMAP_BIT,
  2083. &adapter->open_device_map))
  2084. offload_close(&adapter->tdev);
  2085. }
  2086. t3_free_sge_resources(adapter);
  2087. cxgb_disable_msi(adapter);
  2088. for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
  2089. if (adapter->dummy_netdev[i]) {
  2090. free_netdev(adapter->dummy_netdev[i]);
  2091. adapter->dummy_netdev[i] = NULL;
  2092. }
  2093. for_each_port(adapter, i)
  2094. if (adapter->port[i])
  2095. free_netdev(adapter->port[i]);
  2096. iounmap(adapter->regs);
  2097. kfree(adapter);
  2098. pci_release_regions(pdev);
  2099. pci_disable_device(pdev);
  2100. pci_set_drvdata(pdev, NULL);
  2101. }
  2102. }
  2103. static struct pci_driver driver = {
  2104. .name = DRV_NAME,
  2105. .id_table = cxgb3_pci_tbl,
  2106. .probe = init_one,
  2107. .remove = __devexit_p(remove_one),
  2108. };
  2109. static int __init cxgb3_init_module(void)
  2110. {
  2111. int ret;
  2112. cxgb3_offload_init();
  2113. ret = pci_register_driver(&driver);
  2114. return ret;
  2115. }
  2116. static void __exit cxgb3_cleanup_module(void)
  2117. {
  2118. pci_unregister_driver(&driver);
  2119. if (cxgb3_wq)
  2120. destroy_workqueue(cxgb3_wq);
  2121. }
  2122. module_init(cxgb3_init_module);
  2123. module_exit(cxgb3_cleanup_module);