ipath_driver.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964
  1. /*
  2. * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  3. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/spinlock.h>
  34. #include <linux/idr.h>
  35. #include <linux/pci.h>
  36. #include <linux/delay.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/vmalloc.h>
  39. #include "ipath_kernel.h"
  40. #include "ips_common.h"
  41. #include "ipath_layer.h"
  42. static void ipath_update_pio_bufs(struct ipath_devdata *);
  43. const char *ipath_get_unit_name(int unit)
  44. {
  45. static char iname[16];
  46. snprintf(iname, sizeof iname, "infinipath%u", unit);
  47. return iname;
  48. }
  49. EXPORT_SYMBOL_GPL(ipath_get_unit_name);
  50. #define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
  51. #define PFX IPATH_DRV_NAME ": "
  52. /*
  53. * The size has to be longer than this string, so we can append
  54. * board/chip information to it in the init code.
  55. */
  56. const char ipath_core_version[] = IPATH_IDSTR "\n";
  57. static struct idr unit_table;
  58. DEFINE_SPINLOCK(ipath_devs_lock);
  59. LIST_HEAD(ipath_dev_list);
  60. wait_queue_head_t ipath_sma_state_wait;
  61. unsigned ipath_debug = __IPATH_INFO;
  62. module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
  63. MODULE_PARM_DESC(debug, "mask for debug prints");
  64. EXPORT_SYMBOL_GPL(ipath_debug);
  65. MODULE_LICENSE("GPL");
  66. MODULE_AUTHOR("QLogic <support@pathscale.com>");
  67. MODULE_DESCRIPTION("QLogic InfiniPath driver");
  68. const char *ipath_ibcstatus_str[] = {
  69. "Disabled",
  70. "LinkUp",
  71. "PollActive",
  72. "PollQuiet",
  73. "SleepDelay",
  74. "SleepQuiet",
  75. "LState6", /* unused */
  76. "LState7", /* unused */
  77. "CfgDebounce",
  78. "CfgRcvfCfg",
  79. "CfgWaitRmt",
  80. "CfgIdle",
  81. "RecovRetrain",
  82. "LState0xD", /* unused */
  83. "RecovWaitRmt",
  84. "RecovIdle",
  85. };
  86. /*
  87. * These variables are initialized in the chip-specific files
  88. * but are defined here.
  89. */
  90. u16 ipath_gpio_sda_num, ipath_gpio_scl_num;
  91. u64 ipath_gpio_sda, ipath_gpio_scl;
  92. u64 infinipath_i_bitsextant;
  93. ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
  94. u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
  95. static void __devexit ipath_remove_one(struct pci_dev *);
  96. static int __devinit ipath_init_one(struct pci_dev *,
  97. const struct pci_device_id *);
  98. /* Only needed for registration, nothing else needs this info */
  99. #define PCI_VENDOR_ID_PATHSCALE 0x1fc1
  100. #define PCI_DEVICE_ID_INFINIPATH_HT 0xd
  101. #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
  102. static const struct pci_device_id ipath_pci_tbl[] = {
  103. { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
  104. { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
  105. { 0, }
  106. };
  107. MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
  108. static struct pci_driver ipath_driver = {
  109. .name = IPATH_DRV_NAME,
  110. .probe = ipath_init_one,
  111. .remove = __devexit_p(ipath_remove_one),
  112. .id_table = ipath_pci_tbl,
  113. };
  114. static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
  115. u32 *bar0, u32 *bar1)
  116. {
  117. int ret;
  118. ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
  119. if (ret)
  120. ipath_dev_err(dd, "failed to read bar0 before enable: "
  121. "error %d\n", -ret);
  122. ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
  123. if (ret)
  124. ipath_dev_err(dd, "failed to read bar1 before enable: "
  125. "error %d\n", -ret);
  126. ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
  127. }
  128. static void ipath_free_devdata(struct pci_dev *pdev,
  129. struct ipath_devdata *dd)
  130. {
  131. unsigned long flags;
  132. pci_set_drvdata(pdev, NULL);
  133. if (dd->ipath_unit != -1) {
  134. spin_lock_irqsave(&ipath_devs_lock, flags);
  135. idr_remove(&unit_table, dd->ipath_unit);
  136. list_del(&dd->ipath_list);
  137. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  138. }
  139. vfree(dd);
  140. }
  141. static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
  142. {
  143. unsigned long flags;
  144. struct ipath_devdata *dd;
  145. int ret;
  146. if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
  147. dd = ERR_PTR(-ENOMEM);
  148. goto bail;
  149. }
  150. dd = vmalloc(sizeof(*dd));
  151. if (!dd) {
  152. dd = ERR_PTR(-ENOMEM);
  153. goto bail;
  154. }
  155. memset(dd, 0, sizeof(*dd));
  156. dd->ipath_unit = -1;
  157. spin_lock_irqsave(&ipath_devs_lock, flags);
  158. ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
  159. if (ret < 0) {
  160. printk(KERN_ERR IPATH_DRV_NAME
  161. ": Could not allocate unit ID: error %d\n", -ret);
  162. ipath_free_devdata(pdev, dd);
  163. dd = ERR_PTR(ret);
  164. goto bail_unlock;
  165. }
  166. dd->pcidev = pdev;
  167. pci_set_drvdata(pdev, dd);
  168. list_add(&dd->ipath_list, &ipath_dev_list);
  169. bail_unlock:
  170. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  171. bail:
  172. return dd;
  173. }
  174. static inline struct ipath_devdata *__ipath_lookup(int unit)
  175. {
  176. return idr_find(&unit_table, unit);
  177. }
  178. struct ipath_devdata *ipath_lookup(int unit)
  179. {
  180. struct ipath_devdata *dd;
  181. unsigned long flags;
  182. spin_lock_irqsave(&ipath_devs_lock, flags);
  183. dd = __ipath_lookup(unit);
  184. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  185. return dd;
  186. }
  187. int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
  188. {
  189. int nunits, npresent, nup;
  190. struct ipath_devdata *dd;
  191. unsigned long flags;
  192. u32 maxports;
  193. nunits = npresent = nup = maxports = 0;
  194. spin_lock_irqsave(&ipath_devs_lock, flags);
  195. list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
  196. nunits++;
  197. if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
  198. npresent++;
  199. if (dd->ipath_lid &&
  200. !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
  201. | IPATH_LINKUNK)))
  202. nup++;
  203. if (dd->ipath_cfgports > maxports)
  204. maxports = dd->ipath_cfgports;
  205. }
  206. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  207. if (npresentp)
  208. *npresentp = npresent;
  209. if (nupp)
  210. *nupp = nup;
  211. if (maxportsp)
  212. *maxportsp = maxports;
  213. return nunits;
  214. }
  215. /*
  216. * These next two routines are placeholders in case we don't have per-arch
  217. * code for controlling write combining. If explicit control of write
  218. * combining is not available, performance will probably be awful.
  219. */
  220. int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
  221. {
  222. return -EOPNOTSUPP;
  223. }
  224. void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
  225. {
  226. }
  227. static int __devinit ipath_init_one(struct pci_dev *pdev,
  228. const struct pci_device_id *ent)
  229. {
  230. int ret, len, j;
  231. struct ipath_devdata *dd;
  232. unsigned long long addr;
  233. u32 bar0 = 0, bar1 = 0;
  234. u8 rev;
  235. dd = ipath_alloc_devdata(pdev);
  236. if (IS_ERR(dd)) {
  237. ret = PTR_ERR(dd);
  238. printk(KERN_ERR IPATH_DRV_NAME
  239. ": Could not allocate devdata: error %d\n", -ret);
  240. goto bail;
  241. }
  242. ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
  243. read_bars(dd, pdev, &bar0, &bar1);
  244. ret = pci_enable_device(pdev);
  245. if (ret) {
  246. /* This can happen iff:
  247. *
  248. * We did a chip reset, and then failed to reprogram the
  249. * BAR, or the chip reset due to an internal error. We then
  250. * unloaded the driver and reloaded it.
  251. *
  252. * Both reset cases set the BAR back to initial state. For
  253. * the latter case, the AER sticky error bit at offset 0x718
  254. * should be set, but the Linux kernel doesn't yet know
  255. * about that, it appears. If the original BAR was retained
  256. * in the kernel data structures, this may be OK.
  257. */
  258. ipath_dev_err(dd, "enable unit %d failed: error %d\n",
  259. dd->ipath_unit, -ret);
  260. goto bail_devdata;
  261. }
  262. addr = pci_resource_start(pdev, 0);
  263. len = pci_resource_len(pdev, 0);
  264. ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x "
  265. "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
  266. ent->device, ent->driver_data);
  267. read_bars(dd, pdev, &bar0, &bar1);
  268. if (!bar1 && !(bar0 & ~0xf)) {
  269. if (addr) {
  270. dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
  271. "rewriting as %llx\n", addr);
  272. ret = pci_write_config_dword(
  273. pdev, PCI_BASE_ADDRESS_0, addr);
  274. if (ret) {
  275. ipath_dev_err(dd, "rewrite of BAR0 "
  276. "failed: err %d\n", -ret);
  277. goto bail_disable;
  278. }
  279. ret = pci_write_config_dword(
  280. pdev, PCI_BASE_ADDRESS_1, addr >> 32);
  281. if (ret) {
  282. ipath_dev_err(dd, "rewrite of BAR1 "
  283. "failed: err %d\n", -ret);
  284. goto bail_disable;
  285. }
  286. } else {
  287. ipath_dev_err(dd, "BAR is 0 (probable RESET), "
  288. "not usable until reboot\n");
  289. ret = -ENODEV;
  290. goto bail_disable;
  291. }
  292. }
  293. ret = pci_request_regions(pdev, IPATH_DRV_NAME);
  294. if (ret) {
  295. dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
  296. "err %d\n", dd->ipath_unit, -ret);
  297. goto bail_disable;
  298. }
  299. ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
  300. if (ret) {
  301. /*
  302. * if the 64 bit setup fails, try 32 bit. Some systems
  303. * do not setup 64 bit maps on systems with 2GB or less
  304. * memory installed.
  305. */
  306. ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  307. if (ret) {
  308. dev_info(&pdev->dev,
  309. "Unable to set DMA mask for unit %u: %d\n",
  310. dd->ipath_unit, ret);
  311. goto bail_regions;
  312. }
  313. else {
  314. ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
  315. ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  316. if (ret)
  317. dev_info(&pdev->dev,
  318. "Unable to set DMA consistent mask "
  319. "for unit %u: %d\n",
  320. dd->ipath_unit, ret);
  321. }
  322. }
  323. else {
  324. ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  325. if (ret)
  326. dev_info(&pdev->dev,
  327. "Unable to set DMA consistent mask "
  328. "for unit %u: %d\n",
  329. dd->ipath_unit, ret);
  330. }
  331. pci_set_master(pdev);
  332. /*
  333. * Save BARs to rewrite after device reset. Save all 64 bits of
  334. * BAR, just in case.
  335. */
  336. dd->ipath_pcibar0 = addr;
  337. dd->ipath_pcibar1 = addr >> 32;
  338. dd->ipath_deviceid = ent->device; /* save for later use */
  339. dd->ipath_vendorid = ent->vendor;
  340. /* setup the chip-specific functions, as early as possible. */
  341. switch (ent->device) {
  342. case PCI_DEVICE_ID_INFINIPATH_HT:
  343. ipath_init_ht400_funcs(dd);
  344. break;
  345. case PCI_DEVICE_ID_INFINIPATH_PE800:
  346. ipath_init_pe800_funcs(dd);
  347. break;
  348. default:
  349. ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
  350. "failing\n", ent->device);
  351. return -ENODEV;
  352. }
  353. for (j = 0; j < 6; j++) {
  354. if (!pdev->resource[j].start)
  355. continue;
  356. ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
  357. j, (unsigned long long)pdev->resource[j].start,
  358. (unsigned long long)pdev->resource[j].end,
  359. (unsigned long long)pci_resource_len(pdev, j));
  360. }
  361. if (!addr) {
  362. ipath_dev_err(dd, "No valid address in BAR 0!\n");
  363. ret = -ENODEV;
  364. goto bail_regions;
  365. }
  366. dd->ipath_deviceid = ent->device; /* save for later use */
  367. dd->ipath_vendorid = ent->vendor;
  368. ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
  369. if (ret) {
  370. ipath_dev_err(dd, "Failed to read PCI revision ID unit "
  371. "%u: err %d\n", dd->ipath_unit, -ret);
  372. goto bail_regions; /* shouldn't ever happen */
  373. }
  374. dd->ipath_pcirev = rev;
  375. dd->ipath_kregbase = ioremap_nocache(addr, len);
  376. if (!dd->ipath_kregbase) {
  377. ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
  378. addr);
  379. ret = -ENOMEM;
  380. goto bail_iounmap;
  381. }
  382. dd->ipath_kregend = (u64 __iomem *)
  383. ((void __iomem *)dd->ipath_kregbase + len);
  384. dd->ipath_physaddr = addr; /* used for io_remap, etc. */
  385. /* for user mmap */
  386. ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
  387. addr, dd->ipath_kregbase);
  388. /*
  389. * clear ipath_flags here instead of in ipath_init_chip as it is set
  390. * by ipath_setup_htconfig.
  391. */
  392. dd->ipath_flags = 0;
  393. if (dd->ipath_f_bus(dd, pdev))
  394. ipath_dev_err(dd, "Failed to setup config space; "
  395. "continuing anyway\n");
  396. /*
  397. * set up our interrupt handler; SA_SHIRQ probably not needed,
  398. * since MSI interrupts shouldn't be shared but won't hurt for now.
  399. * check 0 irq after we return from chip-specific bus setup, since
  400. * that can affect this due to setup
  401. */
  402. if (!pdev->irq)
  403. ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
  404. "work\n");
  405. else {
  406. ret = request_irq(pdev->irq, ipath_intr, SA_SHIRQ,
  407. IPATH_DRV_NAME, dd);
  408. if (ret) {
  409. ipath_dev_err(dd, "Couldn't setup irq handler, "
  410. "irq=%u: %d\n", pdev->irq, ret);
  411. goto bail_iounmap;
  412. }
  413. }
  414. ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
  415. if (ret)
  416. goto bail_iounmap;
  417. ret = ipath_enable_wc(dd);
  418. if (ret) {
  419. ipath_dev_err(dd, "Write combining not enabled "
  420. "(err %d): performance may be poor\n",
  421. -ret);
  422. ret = 0;
  423. }
  424. ipath_device_create_group(&pdev->dev, dd);
  425. ipathfs_add_device(dd);
  426. ipath_user_add(dd);
  427. ipath_diag_add(dd);
  428. ipath_layer_add(dd);
  429. goto bail;
  430. bail_iounmap:
  431. iounmap((volatile void __iomem *) dd->ipath_kregbase);
  432. bail_regions:
  433. pci_release_regions(pdev);
  434. bail_disable:
  435. pci_disable_device(pdev);
  436. bail_devdata:
  437. ipath_free_devdata(pdev, dd);
  438. bail:
  439. return ret;
  440. }
  441. static void __devexit ipath_remove_one(struct pci_dev *pdev)
  442. {
  443. struct ipath_devdata *dd;
  444. ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev);
  445. if (!pdev)
  446. return;
  447. dd = pci_get_drvdata(pdev);
  448. ipath_layer_remove(dd);
  449. ipath_diag_remove(dd);
  450. ipath_user_remove(dd);
  451. ipathfs_remove_device(dd);
  452. ipath_device_remove_group(&pdev->dev, dd);
  453. ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
  454. "unit %u\n", dd, (u32) dd->ipath_unit);
  455. if (dd->ipath_kregbase) {
  456. ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n",
  457. dd->ipath_kregbase);
  458. iounmap((volatile void __iomem *) dd->ipath_kregbase);
  459. dd->ipath_kregbase = NULL;
  460. }
  461. pci_release_regions(pdev);
  462. ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
  463. pci_disable_device(pdev);
  464. ipath_free_devdata(pdev, dd);
  465. }
  466. /* general driver use */
  467. DEFINE_MUTEX(ipath_mutex);
  468. static DEFINE_SPINLOCK(ipath_pioavail_lock);
  469. /**
  470. * ipath_disarm_piobufs - cancel a range of PIO buffers
  471. * @dd: the infinipath device
  472. * @first: the first PIO buffer to cancel
  473. * @cnt: the number of PIO buffers to cancel
  474. *
  475. * cancel a range of PIO buffers, used when they might be armed, but
  476. * not triggered. Used at init to ensure buffer state, and also user
  477. * process close, in case it died while writing to a PIO buffer
  478. * Also after errors.
  479. */
  480. void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
  481. unsigned cnt)
  482. {
  483. unsigned i, last = first + cnt;
  484. u64 sendctrl, sendorig;
  485. ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
  486. sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
  487. for (i = first; i < last; i++) {
  488. sendctrl = sendorig |
  489. (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
  490. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  491. sendctrl);
  492. }
  493. /*
  494. * Write it again with current value, in case ipath_sendctrl changed
  495. * while we were looping; no critical bits that would require
  496. * locking.
  497. *
  498. * Write a 0, and then the original value, reading scratch in
  499. * between. This seems to avoid a chip timing race that causes
  500. * pioavail updates to memory to stop.
  501. */
  502. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  503. 0);
  504. sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
  505. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  506. dd->ipath_sendctrl);
  507. }
  508. /**
  509. * ipath_wait_linkstate - wait for an IB link state change to occur
  510. * @dd: the infinipath device
  511. * @state: the state to wait for
  512. * @msecs: the number of milliseconds to wait
  513. *
  514. * wait up to msecs milliseconds for IB link state change to occur for
  515. * now, take the easy polling route. Currently used only by
  516. * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise
  517. * -ETIMEDOUT state can have multiple states set, for any of several
  518. * transitions.
  519. */
  520. int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
  521. {
  522. dd->ipath_sma_state_wanted = state;
  523. wait_event_interruptible_timeout(ipath_sma_state_wait,
  524. (dd->ipath_flags & state),
  525. msecs_to_jiffies(msecs));
  526. dd->ipath_sma_state_wanted = 0;
  527. if (!(dd->ipath_flags & state)) {
  528. u64 val;
  529. ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n",
  530. /* test INIT ahead of DOWN, both can be set */
  531. (state & IPATH_LINKINIT) ? "INIT" :
  532. ((state & IPATH_LINKDOWN) ? "DOWN" :
  533. ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
  534. msecs);
  535. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
  536. ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
  537. (unsigned long long) ipath_read_kreg64(
  538. dd, dd->ipath_kregs->kr_ibcctrl),
  539. (unsigned long long) val,
  540. ipath_ibcstatus_str[val & 0xf]);
  541. }
  542. return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
  543. }
  544. void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
  545. {
  546. *buf = '\0';
  547. if (err & INFINIPATH_E_RHDRLEN)
  548. strlcat(buf, "rhdrlen ", blen);
  549. if (err & INFINIPATH_E_RBADTID)
  550. strlcat(buf, "rbadtid ", blen);
  551. if (err & INFINIPATH_E_RBADVERSION)
  552. strlcat(buf, "rbadversion ", blen);
  553. if (err & INFINIPATH_E_RHDR)
  554. strlcat(buf, "rhdr ", blen);
  555. if (err & INFINIPATH_E_RLONGPKTLEN)
  556. strlcat(buf, "rlongpktlen ", blen);
  557. if (err & INFINIPATH_E_RSHORTPKTLEN)
  558. strlcat(buf, "rshortpktlen ", blen);
  559. if (err & INFINIPATH_E_RMAXPKTLEN)
  560. strlcat(buf, "rmaxpktlen ", blen);
  561. if (err & INFINIPATH_E_RMINPKTLEN)
  562. strlcat(buf, "rminpktlen ", blen);
  563. if (err & INFINIPATH_E_RFORMATERR)
  564. strlcat(buf, "rformaterr ", blen);
  565. if (err & INFINIPATH_E_RUNSUPVL)
  566. strlcat(buf, "runsupvl ", blen);
  567. if (err & INFINIPATH_E_RUNEXPCHAR)
  568. strlcat(buf, "runexpchar ", blen);
  569. if (err & INFINIPATH_E_RIBFLOW)
  570. strlcat(buf, "ribflow ", blen);
  571. if (err & INFINIPATH_E_REBP)
  572. strlcat(buf, "EBP ", blen);
  573. if (err & INFINIPATH_E_SUNDERRUN)
  574. strlcat(buf, "sunderrun ", blen);
  575. if (err & INFINIPATH_E_SPIOARMLAUNCH)
  576. strlcat(buf, "spioarmlaunch ", blen);
  577. if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
  578. strlcat(buf, "sunexperrpktnum ", blen);
  579. if (err & INFINIPATH_E_SDROPPEDDATAPKT)
  580. strlcat(buf, "sdroppeddatapkt ", blen);
  581. if (err & INFINIPATH_E_SDROPPEDSMPPKT)
  582. strlcat(buf, "sdroppedsmppkt ", blen);
  583. if (err & INFINIPATH_E_SMAXPKTLEN)
  584. strlcat(buf, "smaxpktlen ", blen);
  585. if (err & INFINIPATH_E_SMINPKTLEN)
  586. strlcat(buf, "sminpktlen ", blen);
  587. if (err & INFINIPATH_E_SUNSUPVL)
  588. strlcat(buf, "sunsupVL ", blen);
  589. if (err & INFINIPATH_E_SPKTLEN)
  590. strlcat(buf, "spktlen ", blen);
  591. if (err & INFINIPATH_E_INVALIDADDR)
  592. strlcat(buf, "invalidaddr ", blen);
  593. if (err & INFINIPATH_E_RICRC)
  594. strlcat(buf, "CRC ", blen);
  595. if (err & INFINIPATH_E_RVCRC)
  596. strlcat(buf, "VCRC ", blen);
  597. if (err & INFINIPATH_E_RRCVEGRFULL)
  598. strlcat(buf, "rcvegrfull ", blen);
  599. if (err & INFINIPATH_E_RRCVHDRFULL)
  600. strlcat(buf, "rcvhdrfull ", blen);
  601. if (err & INFINIPATH_E_IBSTATUSCHANGED)
  602. strlcat(buf, "ibcstatuschg ", blen);
  603. if (err & INFINIPATH_E_RIBLOSTLINK)
  604. strlcat(buf, "riblostlink ", blen);
  605. if (err & INFINIPATH_E_HARDWARE)
  606. strlcat(buf, "hardware ", blen);
  607. if (err & INFINIPATH_E_RESET)
  608. strlcat(buf, "reset ", blen);
  609. }
  610. /**
  611. * get_rhf_errstring - decode RHF errors
  612. * @err: the err number
  613. * @msg: the output buffer
  614. * @len: the length of the output buffer
  615. *
  616. * only used one place now, may want more later
  617. */
  618. static void get_rhf_errstring(u32 err, char *msg, size_t len)
  619. {
  620. /* if no errors, and so don't need to check what's first */
  621. *msg = '\0';
  622. if (err & INFINIPATH_RHF_H_ICRCERR)
  623. strlcat(msg, "icrcerr ", len);
  624. if (err & INFINIPATH_RHF_H_VCRCERR)
  625. strlcat(msg, "vcrcerr ", len);
  626. if (err & INFINIPATH_RHF_H_PARITYERR)
  627. strlcat(msg, "parityerr ", len);
  628. if (err & INFINIPATH_RHF_H_LENERR)
  629. strlcat(msg, "lenerr ", len);
  630. if (err & INFINIPATH_RHF_H_MTUERR)
  631. strlcat(msg, "mtuerr ", len);
  632. if (err & INFINIPATH_RHF_H_IHDRERR)
  633. /* infinipath hdr checksum error */
  634. strlcat(msg, "ipathhdrerr ", len);
  635. if (err & INFINIPATH_RHF_H_TIDERR)
  636. strlcat(msg, "tiderr ", len);
  637. if (err & INFINIPATH_RHF_H_MKERR)
  638. /* bad port, offset, etc. */
  639. strlcat(msg, "invalid ipathhdr ", len);
  640. if (err & INFINIPATH_RHF_H_IBERR)
  641. strlcat(msg, "iberr ", len);
  642. if (err & INFINIPATH_RHF_L_SWA)
  643. strlcat(msg, "swA ", len);
  644. if (err & INFINIPATH_RHF_L_SWB)
  645. strlcat(msg, "swB ", len);
  646. }
  647. /**
  648. * ipath_get_egrbuf - get an eager buffer
  649. * @dd: the infinipath device
  650. * @bufnum: the eager buffer to get
  651. * @err: unused
  652. *
  653. * must only be called if ipath_pd[port] is known to be allocated
  654. */
  655. static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
  656. int err)
  657. {
  658. return dd->ipath_port0_skbs ?
  659. (void *)dd->ipath_port0_skbs[bufnum]->data : NULL;
  660. }
  661. /**
  662. * ipath_alloc_skb - allocate an skb and buffer with possible constraints
  663. * @dd: the infinipath device
  664. * @gfp_mask: the sk_buff SFP mask
  665. */
  666. struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
  667. gfp_t gfp_mask)
  668. {
  669. struct sk_buff *skb;
  670. u32 len;
  671. /*
  672. * Only fully supported way to handle this is to allocate lots
  673. * extra, align as needed, and then do skb_reserve(). That wastes
  674. * a lot of memory... I'll have to hack this into infinipath_copy
  675. * also.
  676. */
  677. /*
  678. * We need 4 extra bytes for unaligned transfer copying
  679. */
  680. if (dd->ipath_flags & IPATH_4BYTE_TID) {
  681. /* we need a 4KB multiple alignment, and there is no way
  682. * to do it except to allocate extra and then skb_reserve
  683. * enough to bring it up to the right alignment.
  684. */
  685. len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1;
  686. }
  687. else
  688. len = dd->ipath_ibmaxlen + 4;
  689. skb = __dev_alloc_skb(len, gfp_mask);
  690. if (!skb) {
  691. ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
  692. len);
  693. goto bail;
  694. }
  695. if (dd->ipath_flags & IPATH_4BYTE_TID) {
  696. u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4);
  697. if (una)
  698. skb_reserve(skb, 4 + (1 << 11) - una);
  699. else
  700. skb_reserve(skb, 4);
  701. } else
  702. skb_reserve(skb, 4);
  703. bail:
  704. return skb;
  705. }
  706. /**
  707. * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
  708. * @dd: the infinipath device
  709. * @etail: the sk_buff number
  710. * @tlen: the total packet length
  711. * @hdr: the ethernet header
  712. *
  713. * Separate routine for better overall optimization
  714. */
  715. static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
  716. u32 tlen, struct ether_header *hdr)
  717. {
  718. u32 elen;
  719. u8 pad, *bthbytes;
  720. struct sk_buff *skb, *nskb;
  721. if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
  722. /*
  723. * Allocate a new sk_buff to replace the one we give
  724. * to the network stack.
  725. */
  726. nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
  727. if (!nskb) {
  728. /* count OK packets that we drop */
  729. ipath_stats.sps_krdrops++;
  730. return;
  731. }
  732. bthbytes = (u8 *) hdr->bth;
  733. pad = (bthbytes[1] >> 4) & 3;
  734. /* +CRC32 */
  735. elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
  736. skb = dd->ipath_port0_skbs[etail];
  737. dd->ipath_port0_skbs[etail] = nskb;
  738. skb_put(skb, elen);
  739. dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
  740. ((char __iomem *) dd->ipath_kregbase
  741. + dd->ipath_rcvegrbase), 0,
  742. virt_to_phys(nskb->data));
  743. __ipath_layer_rcv(dd, hdr, skb);
  744. /* another ether packet received */
  745. ipath_stats.sps_ether_rpkts++;
  746. }
  747. else if (hdr->sub_opcode == OPCODE_LID_ARP)
  748. __ipath_layer_rcv_lid(dd, hdr);
  749. }
  750. /*
  751. * ipath_kreceive - receive a packet
  752. * @dd: the infinipath device
  753. *
  754. * called from interrupt handler for errors or receive interrupt
  755. */
  756. void ipath_kreceive(struct ipath_devdata *dd)
  757. {
  758. u64 *rc;
  759. void *ebuf;
  760. const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
  761. const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
  762. u32 etail = -1, l, hdrqtail;
  763. struct ips_message_header *hdr;
  764. u32 eflags, i, etype, tlen, pkttot = 0;
  765. static u64 totcalls; /* stats, may eventually remove */
  766. char emsg[128];
  767. if (!dd->ipath_hdrqtailptr) {
  768. ipath_dev_err(dd,
  769. "hdrqtailptr not set, can't do receives\n");
  770. goto bail;
  771. }
  772. /* There is already a thread processing this queue. */
  773. if (test_and_set_bit(0, &dd->ipath_rcv_pending))
  774. goto bail;
  775. if (dd->ipath_port0head ==
  776. (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
  777. goto done;
  778. gotmore:
  779. /*
  780. * read only once at start. If in flood situation, this helps
  781. * performance slightly. If more arrive while we are processing,
  782. * we'll come back here and do them
  783. */
  784. hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
  785. for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
  786. u32 qp;
  787. u8 *bthbytes;
  788. rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
  789. hdr = (struct ips_message_header *)&rc[1];
  790. /*
  791. * could make a network order version of IPATH_KD_QP, and
  792. * do the obvious shift before masking to speed this up.
  793. */
  794. qp = ntohl(hdr->bth[1]) & 0xffffff;
  795. bthbytes = (u8 *) hdr->bth;
  796. eflags = ips_get_hdr_err_flags((__le32 *) rc);
  797. etype = ips_get_rcv_type((__le32 *) rc);
  798. /* total length */
  799. tlen = ips_get_length_in_bytes((__le32 *) rc);
  800. ebuf = NULL;
  801. if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
  802. /*
  803. * it turns out that the chips uses an eager buffer
  804. * for all non-expected packets, whether it "needs"
  805. * one or not. So always get the index, but don't
  806. * set ebuf (so we try to copy data) unless the
  807. * length requires it.
  808. */
  809. etail = ips_get_index((__le32 *) rc);
  810. if (tlen > sizeof(*hdr) ||
  811. etype == RCVHQ_RCV_TYPE_NON_KD)
  812. ebuf = ipath_get_egrbuf(dd, etail, 0);
  813. }
  814. /*
  815. * both tiderr and ipathhdrerr are set for all plain IB
  816. * packets; only ipathhdrerr should be set.
  817. */
  818. if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
  819. RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
  820. hdr->iph.ver_port_tid_offset) !=
  821. IPS_PROTO_VERSION) {
  822. ipath_cdbg(PKT, "Bad InfiniPath protocol version "
  823. "%x\n", etype);
  824. }
  825. if (eflags & ~(INFINIPATH_RHF_H_TIDERR |
  826. INFINIPATH_RHF_H_IHDRERR)) {
  827. get_rhf_errstring(eflags, emsg, sizeof emsg);
  828. ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
  829. "tlen=%x opcode=%x egridx=%x: %s\n",
  830. eflags, l, etype, tlen, bthbytes[0],
  831. ips_get_index((__le32 *) rc), emsg);
  832. } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
  833. int ret = __ipath_verbs_rcv(dd, rc + 1,
  834. ebuf, tlen);
  835. if (ret == -ENODEV)
  836. ipath_cdbg(VERBOSE,
  837. "received IB packet, "
  838. "not SMA (QP=%x)\n", qp);
  839. } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
  840. if (qp == IPATH_KD_QP &&
  841. bthbytes[0] == ipath_layer_rcv_opcode &&
  842. ebuf)
  843. ipath_rcv_layer(dd, etail, tlen,
  844. (struct ether_header *)hdr);
  845. else
  846. ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
  847. "qp=%x), len %x; ignored\n",
  848. etype, bthbytes[0], qp, tlen);
  849. }
  850. else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
  851. ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
  852. be32_to_cpu(hdr->bth[0]) & 0xff);
  853. else if (eflags & (INFINIPATH_RHF_H_TIDERR |
  854. INFINIPATH_RHF_H_IHDRERR)) {
  855. /*
  856. * This is a type 3 packet, only the LRH is in the
  857. * rcvhdrq, the rest of the header is in the eager
  858. * buffer.
  859. */
  860. u8 opcode;
  861. if (ebuf) {
  862. bthbytes = (u8 *) ebuf;
  863. opcode = *bthbytes;
  864. }
  865. else
  866. opcode = 0;
  867. get_rhf_errstring(eflags, emsg, sizeof emsg);
  868. ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
  869. "len %x\n", eflags, emsg, opcode, etail,
  870. tlen);
  871. } else {
  872. /*
  873. * error packet, type of error unknown.
  874. * Probably type 3, but we don't know, so don't
  875. * even try to print the opcode, etc.
  876. */
  877. ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
  878. "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
  879. "hdr %llx %llx %llx %llx %llx\n",
  880. etail, tlen, (unsigned long) rc, l,
  881. (unsigned long long) rc[0],
  882. (unsigned long long) rc[1],
  883. (unsigned long long) rc[2],
  884. (unsigned long long) rc[3],
  885. (unsigned long long) rc[4],
  886. (unsigned long long) rc[5]);
  887. }
  888. l += rsize;
  889. if (l >= maxcnt)
  890. l = 0;
  891. /*
  892. * update for each packet, to help prevent overflows if we
  893. * have lots of packets.
  894. */
  895. (void)ipath_write_ureg(dd, ur_rcvhdrhead,
  896. dd->ipath_rhdrhead_intr_off | l, 0);
  897. if (etype != RCVHQ_RCV_TYPE_EXPECTED)
  898. (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
  899. etail, 0);
  900. }
  901. pkttot += i;
  902. dd->ipath_port0head = l;
  903. if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
  904. /* more arrived while we handled first batch */
  905. goto gotmore;
  906. if (pkttot > ipath_stats.sps_maxpkts_call)
  907. ipath_stats.sps_maxpkts_call = pkttot;
  908. ipath_stats.sps_port0pkts += pkttot;
  909. ipath_stats.sps_avgpkts_call =
  910. ipath_stats.sps_port0pkts / ++totcalls;
  911. done:
  912. clear_bit(0, &dd->ipath_rcv_pending);
  913. smp_mb__after_clear_bit();
  914. bail:;
  915. }
  916. /**
  917. * ipath_update_pio_bufs - update shadow copy of the PIO availability map
  918. * @dd: the infinipath device
  919. *
  920. * called whenever our local copy indicates we have run out of send buffers
  921. * NOTE: This can be called from interrupt context by some code
  922. * and from non-interrupt context by ipath_getpiobuf().
  923. */
  924. static void ipath_update_pio_bufs(struct ipath_devdata *dd)
  925. {
  926. unsigned long flags;
  927. int i;
  928. const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
  929. /* If the generation (check) bits have changed, then we update the
  930. * busy bit for the corresponding PIO buffer. This algorithm will
  931. * modify positions to the value they already have in some cases
  932. * (i.e., no change), but it's faster than changing only the bits
  933. * that have changed.
  934. *
  935. * We would like to do this atomicly, to avoid spinlocks in the
  936. * critical send path, but that's not really possible, given the
  937. * type of changes, and that this routine could be called on
  938. * multiple cpu's simultaneously, so we lock in this routine only,
  939. * to avoid conflicting updates; all we change is the shadow, and
  940. * it's a single 64 bit memory location, so by definition the update
  941. * is atomic in terms of what other cpu's can see in testing the
  942. * bits. The spin_lock overhead isn't too bad, since it only
  943. * happens when all buffers are in use, so only cpu overhead, not
  944. * latency or bandwidth is affected.
  945. */
  946. #define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
  947. if (!dd->ipath_pioavailregs_dma) {
  948. ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
  949. return;
  950. }
  951. if (ipath_debug & __IPATH_VERBDBG) {
  952. /* only if packet debug and verbose */
  953. volatile __le64 *dma = dd->ipath_pioavailregs_dma;
  954. unsigned long *shadow = dd->ipath_pioavailshadow;
  955. ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
  956. "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
  957. "s3=%lx\n",
  958. (unsigned long long) le64_to_cpu(dma[0]),
  959. shadow[0],
  960. (unsigned long long) le64_to_cpu(dma[1]),
  961. shadow[1],
  962. (unsigned long long) le64_to_cpu(dma[2]),
  963. shadow[2],
  964. (unsigned long long) le64_to_cpu(dma[3]),
  965. shadow[3]);
  966. if (piobregs > 4)
  967. ipath_cdbg(
  968. PKT, "2nd group, dma4=%llx shad4=%lx, "
  969. "d5=%llx s5=%lx, d6=%llx s6=%lx, "
  970. "d7=%llx s7=%lx\n",
  971. (unsigned long long) le64_to_cpu(dma[4]),
  972. shadow[4],
  973. (unsigned long long) le64_to_cpu(dma[5]),
  974. shadow[5],
  975. (unsigned long long) le64_to_cpu(dma[6]),
  976. shadow[6],
  977. (unsigned long long) le64_to_cpu(dma[7]),
  978. shadow[7]);
  979. }
  980. spin_lock_irqsave(&ipath_pioavail_lock, flags);
  981. for (i = 0; i < piobregs; i++) {
  982. u64 pchbusy, pchg, piov, pnew;
  983. /*
  984. * Chip Errata: bug 6641; even and odd qwords>3 are swapped
  985. */
  986. if (i > 3) {
  987. if (i & 1)
  988. piov = le64_to_cpu(
  989. dd->ipath_pioavailregs_dma[i - 1]);
  990. else
  991. piov = le64_to_cpu(
  992. dd->ipath_pioavailregs_dma[i + 1]);
  993. } else
  994. piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
  995. pchg = _IPATH_ALL_CHECKBITS &
  996. ~(dd->ipath_pioavailshadow[i] ^ piov);
  997. pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
  998. if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
  999. pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
  1000. pnew |= piov & pchbusy;
  1001. dd->ipath_pioavailshadow[i] = pnew;
  1002. }
  1003. }
  1004. spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
  1005. }
  1006. /**
  1007. * ipath_setrcvhdrsize - set the receive header size
  1008. * @dd: the infinipath device
  1009. * @rhdrsize: the receive header size
  1010. *
  1011. * called from user init code, and also layered driver init
  1012. */
  1013. int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
  1014. {
  1015. int ret = 0;
  1016. if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
  1017. if (dd->ipath_rcvhdrsize != rhdrsize) {
  1018. dev_info(&dd->pcidev->dev,
  1019. "Error: can't set protocol header "
  1020. "size %u, already %u\n",
  1021. rhdrsize, dd->ipath_rcvhdrsize);
  1022. ret = -EAGAIN;
  1023. } else
  1024. ipath_cdbg(VERBOSE, "Reuse same protocol header "
  1025. "size %u\n", dd->ipath_rcvhdrsize);
  1026. } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
  1027. (sizeof(u64) / sizeof(u32)))) {
  1028. ipath_dbg("Error: can't set protocol header size %u "
  1029. "(> max %u)\n", rhdrsize,
  1030. dd->ipath_rcvhdrentsize -
  1031. (u32) (sizeof(u64) / sizeof(u32)));
  1032. ret = -EOVERFLOW;
  1033. } else {
  1034. dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
  1035. dd->ipath_rcvhdrsize = rhdrsize;
  1036. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
  1037. dd->ipath_rcvhdrsize);
  1038. ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
  1039. dd->ipath_rcvhdrsize);
  1040. }
  1041. return ret;
  1042. }
  1043. /**
  1044. * ipath_getpiobuf - find an available pio buffer
  1045. * @dd: the infinipath device
  1046. * @pbufnum: the buffer number is placed here
  1047. *
  1048. * do appropriate marking as busy, etc.
  1049. * returns buffer number if one found (>=0), negative number is error.
  1050. * Used by ipath_sma_send_pkt and ipath_layer_send
  1051. */
  1052. u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
  1053. {
  1054. int i, j, starti, updated = 0;
  1055. unsigned piobcnt, iter;
  1056. unsigned long flags;
  1057. unsigned long *shadow = dd->ipath_pioavailshadow;
  1058. u32 __iomem *buf;
  1059. piobcnt = (unsigned)(dd->ipath_piobcnt2k
  1060. + dd->ipath_piobcnt4k);
  1061. starti = dd->ipath_lastport_piobuf;
  1062. iter = piobcnt - starti;
  1063. if (dd->ipath_upd_pio_shadow) {
  1064. /*
  1065. * Minor optimization. If we had no buffers on last call,
  1066. * start out by doing the update; continue and do scan even
  1067. * if no buffers were updated, to be paranoid
  1068. */
  1069. ipath_update_pio_bufs(dd);
  1070. /* we scanned here, don't do it at end of scan */
  1071. updated = 1;
  1072. i = starti;
  1073. } else
  1074. i = dd->ipath_lastpioindex;
  1075. rescan:
  1076. /*
  1077. * while test_and_set_bit() is atomic, we do that and then the
  1078. * change_bit(), and the pair is not. See if this is the cause
  1079. * of the remaining armlaunch errors.
  1080. */
  1081. spin_lock_irqsave(&ipath_pioavail_lock, flags);
  1082. for (j = 0; j < iter; j++, i++) {
  1083. if (i >= piobcnt)
  1084. i = starti;
  1085. /*
  1086. * To avoid bus lock overhead, we first find a candidate
  1087. * buffer, then do the test and set, and continue if that
  1088. * fails.
  1089. */
  1090. if (test_bit((2 * i) + 1, shadow) ||
  1091. test_and_set_bit((2 * i) + 1, shadow))
  1092. continue;
  1093. /* flip generation bit */
  1094. change_bit(2 * i, shadow);
  1095. break;
  1096. }
  1097. spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
  1098. if (j == iter) {
  1099. volatile __le64 *dma = dd->ipath_pioavailregs_dma;
  1100. /*
  1101. * first time through; shadow exhausted, but may be real
  1102. * buffers available, so go see; if any updated, rescan
  1103. * (once)
  1104. */
  1105. if (!updated) {
  1106. ipath_update_pio_bufs(dd);
  1107. updated = 1;
  1108. i = starti;
  1109. goto rescan;
  1110. }
  1111. dd->ipath_upd_pio_shadow = 1;
  1112. /*
  1113. * not atomic, but if we lose one once in a while, that's OK
  1114. */
  1115. ipath_stats.sps_nopiobufs++;
  1116. if (!(++dd->ipath_consec_nopiobuf % 100000)) {
  1117. ipath_dbg(
  1118. "%u pio sends with no bufavail; dmacopy: "
  1119. "%llx %llx %llx %llx; shadow: "
  1120. "%lx %lx %lx %lx\n",
  1121. dd->ipath_consec_nopiobuf,
  1122. (unsigned long long) le64_to_cpu(dma[0]),
  1123. (unsigned long long) le64_to_cpu(dma[1]),
  1124. (unsigned long long) le64_to_cpu(dma[2]),
  1125. (unsigned long long) le64_to_cpu(dma[3]),
  1126. shadow[0], shadow[1], shadow[2],
  1127. shadow[3]);
  1128. /*
  1129. * 4 buffers per byte, 4 registers above, cover rest
  1130. * below
  1131. */
  1132. if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
  1133. (sizeof(shadow[0]) * 4 * 4))
  1134. ipath_dbg("2nd group: dmacopy: %llx %llx "
  1135. "%llx %llx; shadow: %lx %lx "
  1136. "%lx %lx\n",
  1137. (unsigned long long)
  1138. le64_to_cpu(dma[4]),
  1139. (unsigned long long)
  1140. le64_to_cpu(dma[5]),
  1141. (unsigned long long)
  1142. le64_to_cpu(dma[6]),
  1143. (unsigned long long)
  1144. le64_to_cpu(dma[7]),
  1145. shadow[4], shadow[5],
  1146. shadow[6], shadow[7]);
  1147. }
  1148. buf = NULL;
  1149. goto bail;
  1150. }
  1151. if (updated)
  1152. /*
  1153. * ran out of bufs, now some (at least this one we just
  1154. * got) are now available, so tell the layered driver.
  1155. */
  1156. __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
  1157. /*
  1158. * set next starting place. Since it's just an optimization,
  1159. * it doesn't matter who wins on this, so no locking
  1160. */
  1161. dd->ipath_lastpioindex = i + 1;
  1162. if (dd->ipath_upd_pio_shadow)
  1163. dd->ipath_upd_pio_shadow = 0;
  1164. if (dd->ipath_consec_nopiobuf)
  1165. dd->ipath_consec_nopiobuf = 0;
  1166. if (i < dd->ipath_piobcnt2k)
  1167. buf = (u32 __iomem *) (dd->ipath_pio2kbase +
  1168. i * dd->ipath_palign);
  1169. else
  1170. buf = (u32 __iomem *)
  1171. (dd->ipath_pio4kbase +
  1172. (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
  1173. ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
  1174. i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
  1175. if (pbufnum)
  1176. *pbufnum = i;
  1177. bail:
  1178. return buf;
  1179. }
  1180. /**
  1181. * ipath_create_rcvhdrq - create a receive header queue
  1182. * @dd: the infinipath device
  1183. * @pd: the port data
  1184. *
  1185. * this must be contiguous memory (from an i/o perspective), and must be
  1186. * DMA'able (which means for some systems, it will go through an IOMMU,
  1187. * or be forced into a low address range).
  1188. */
  1189. int ipath_create_rcvhdrq(struct ipath_devdata *dd,
  1190. struct ipath_portdata *pd)
  1191. {
  1192. int ret = 0;
  1193. if (!pd->port_rcvhdrq) {
  1194. dma_addr_t phys_hdrqtail;
  1195. gfp_t gfp_flags = GFP_USER | __GFP_COMP;
  1196. int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
  1197. sizeof(u32), PAGE_SIZE);
  1198. pd->port_rcvhdrq = dma_alloc_coherent(
  1199. &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
  1200. gfp_flags);
  1201. if (!pd->port_rcvhdrq) {
  1202. ipath_dev_err(dd, "attempt to allocate %d bytes "
  1203. "for port %u rcvhdrq failed\n",
  1204. amt, pd->port_port);
  1205. ret = -ENOMEM;
  1206. goto bail;
  1207. }
  1208. pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
  1209. &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
  1210. if (!pd->port_rcvhdrtail_kvaddr) {
  1211. ipath_dev_err(dd, "attempt to allocate 1 page "
  1212. "for port %u rcvhdrqtailaddr failed\n",
  1213. pd->port_port);
  1214. ret = -ENOMEM;
  1215. goto bail;
  1216. }
  1217. pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
  1218. pd->port_rcvhdrq_size = amt;
  1219. ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
  1220. "for port %u rcvhdr Q\n",
  1221. amt >> PAGE_SHIFT, pd->port_rcvhdrq,
  1222. (unsigned long) pd->port_rcvhdrq_phys,
  1223. (unsigned long) pd->port_rcvhdrq_size,
  1224. pd->port_port);
  1225. ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
  1226. pd->port_port,
  1227. (unsigned long long) phys_hdrqtail);
  1228. }
  1229. else
  1230. ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
  1231. "hdrtailaddr@%p %llx physical\n",
  1232. pd->port_port, pd->port_rcvhdrq,
  1233. pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr,
  1234. (unsigned long long)pd->port_rcvhdrqtailaddr_phys);
  1235. /* clear for security and sanity on each use */
  1236. memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
  1237. memset((void *)pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
  1238. /*
  1239. * tell chip each time we init it, even if we are re-using previous
  1240. * memory (we zero the register at process close)
  1241. */
  1242. ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
  1243. pd->port_port, pd->port_rcvhdrqtailaddr_phys);
  1244. ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
  1245. pd->port_port, pd->port_rcvhdrq_phys);
  1246. ret = 0;
  1247. bail:
  1248. return ret;
  1249. }
  1250. int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
  1251. u64 bits_to_wait_for, u64 * valp)
  1252. {
  1253. unsigned long timeout;
  1254. u64 lastval, val;
  1255. int ret;
  1256. lastval = ipath_read_kreg64(dd, reg_id);
  1257. /* wait a ridiculously long time */
  1258. timeout = jiffies + msecs_to_jiffies(5);
  1259. do {
  1260. val = ipath_read_kreg64(dd, reg_id);
  1261. /* set so they have something, even on failures. */
  1262. *valp = val;
  1263. if ((val & bits_to_wait_for) == bits_to_wait_for) {
  1264. ret = 0;
  1265. break;
  1266. }
  1267. if (val != lastval)
  1268. ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
  1269. "waiting for %llx bits\n",
  1270. (unsigned long long) lastval,
  1271. (unsigned long long) val,
  1272. (unsigned long long) bits_to_wait_for);
  1273. cond_resched();
  1274. if (time_after(jiffies, timeout)) {
  1275. ipath_dbg("Didn't get bits %llx in register 0x%x, "
  1276. "got %llx\n",
  1277. (unsigned long long) bits_to_wait_for,
  1278. reg_id, (unsigned long long) *valp);
  1279. ret = -ENODEV;
  1280. break;
  1281. }
  1282. } while (1);
  1283. return ret;
  1284. }
  1285. /**
  1286. * ipath_waitfor_mdio_cmdready - wait for last command to complete
  1287. * @dd: the infinipath device
  1288. *
  1289. * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
  1290. * away indicating the last command has completed. It doesn't return data
  1291. */
  1292. int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
  1293. {
  1294. unsigned long timeout;
  1295. u64 val;
  1296. int ret;
  1297. /* wait a ridiculously long time */
  1298. timeout = jiffies + msecs_to_jiffies(5);
  1299. do {
  1300. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
  1301. if (!(val & IPATH_MDIO_CMDVALID)) {
  1302. ret = 0;
  1303. break;
  1304. }
  1305. cond_resched();
  1306. if (time_after(jiffies, timeout)) {
  1307. ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
  1308. (unsigned long long) val);
  1309. ret = -ENODEV;
  1310. break;
  1311. }
  1312. } while (1);
  1313. return ret;
  1314. }
  1315. void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
  1316. {
  1317. static const char *what[4] = {
  1318. [0] = "DOWN",
  1319. [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT",
  1320. [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
  1321. [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
  1322. };
  1323. int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
  1324. INFINIPATH_IBCC_LINKCMD_MASK;
  1325. ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
  1326. "is %s\n", dd->ipath_unit,
  1327. what[linkcmd],
  1328. ipath_ibcstatus_str[
  1329. (ipath_read_kreg64
  1330. (dd, dd->ipath_kregs->kr_ibcstatus) >>
  1331. INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
  1332. INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
  1333. /* flush all queued sends when going to DOWN or INIT, to be sure that
  1334. * they don't block SMA and other MAD packets */
  1335. if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
  1336. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  1337. INFINIPATH_S_ABORT);
  1338. ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
  1339. (unsigned)(dd->ipath_piobcnt2k +
  1340. dd->ipath_piobcnt4k) -
  1341. dd->ipath_lastport_piobuf);
  1342. }
  1343. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1344. dd->ipath_ibcctrl | which);
  1345. }
  1346. /**
  1347. * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
  1348. * @dd: the infinipath device
  1349. * @regno: the register number to read
  1350. * @port: the port containing the register
  1351. *
  1352. * Registers that vary with the chip implementation constants (port)
  1353. * use this routine.
  1354. */
  1355. u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno,
  1356. unsigned port)
  1357. {
  1358. u16 where;
  1359. if (port < dd->ipath_portcnt &&
  1360. (regno == dd->ipath_kregs->kr_rcvhdraddr ||
  1361. regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
  1362. where = regno + port;
  1363. else
  1364. where = -1;
  1365. return ipath_read_kreg64(dd, where);
  1366. }
  1367. /**
  1368. * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
  1369. * @dd: the infinipath device
  1370. * @regno: the register number to write
  1371. * @port: the port containing the register
  1372. * @value: the value to write
  1373. *
  1374. * Registers that vary with the chip implementation constants (port)
  1375. * use this routine.
  1376. */
  1377. void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
  1378. unsigned port, u64 value)
  1379. {
  1380. u16 where;
  1381. if (port < dd->ipath_portcnt &&
  1382. (regno == dd->ipath_kregs->kr_rcvhdraddr ||
  1383. regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
  1384. where = regno + port;
  1385. else
  1386. where = -1;
  1387. ipath_write_kreg(dd, where, value);
  1388. }
  1389. /**
  1390. * ipath_shutdown_device - shut down a device
  1391. * @dd: the infinipath device
  1392. *
  1393. * This is called to make the device quiet when we are about to
  1394. * unload the driver, and also when the device is administratively
  1395. * disabled. It does not free any data structures.
  1396. * Everything it does has to be setup again by ipath_init_chip(dd,1)
  1397. */
  1398. void ipath_shutdown_device(struct ipath_devdata *dd)
  1399. {
  1400. u64 val;
  1401. ipath_dbg("Shutting down the device\n");
  1402. dd->ipath_flags |= IPATH_LINKUNK;
  1403. dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
  1404. IPATH_LINKINIT | IPATH_LINKARMED |
  1405. IPATH_LINKACTIVE);
  1406. *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
  1407. IPATH_STATUS_IB_READY);
  1408. /* mask interrupts, but not errors */
  1409. ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
  1410. dd->ipath_rcvctrl = 0;
  1411. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
  1412. dd->ipath_rcvctrl);
  1413. /*
  1414. * gracefully stop all sends allowing any in progress to trickle out
  1415. * first.
  1416. */
  1417. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
  1418. /* flush it */
  1419. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
  1420. /*
  1421. * enough for anything that's going to trickle out to have actually
  1422. * done so.
  1423. */
  1424. udelay(5);
  1425. /*
  1426. * abort any armed or launched PIO buffers that didn't go. (self
  1427. * clearing). Will cause any packet currently being transmitted to
  1428. * go out with an EBP, and may also cause a short packet error on
  1429. * the receiver.
  1430. */
  1431. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  1432. INFINIPATH_S_ABORT);
  1433. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
  1434. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  1435. /*
  1436. * we are shutting down, so tell the layered driver. We don't do
  1437. * this on just a link state change, much like ethernet, a cable
  1438. * unplug, etc. doesn't change driver state
  1439. */
  1440. ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
  1441. /* disable IBC */
  1442. dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
  1443. ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
  1444. dd->ipath_control | INFINIPATH_C_FREEZEMODE);
  1445. /*
  1446. * clear SerdesEnable and turn the leds off; do this here because
  1447. * we are unloading, so don't count on interrupts to move along
  1448. * Turn the LEDs off explictly for the same reason.
  1449. */
  1450. dd->ipath_f_quiet_serdes(dd);
  1451. dd->ipath_f_setextled(dd, 0, 0);
  1452. if (dd->ipath_stats_timer_active) {
  1453. del_timer_sync(&dd->ipath_stats_timer);
  1454. dd->ipath_stats_timer_active = 0;
  1455. }
  1456. /*
  1457. * clear all interrupts and errors, so that the next time the driver
  1458. * is loaded or device is enabled, we know that whatever is set
  1459. * happened while we were unloaded
  1460. */
  1461. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
  1462. ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
  1463. ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
  1464. ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
  1465. }
  1466. /**
  1467. * ipath_free_pddata - free a port's allocated data
  1468. * @dd: the infinipath device
  1469. * @pd: the portdata structure
  1470. *
  1471. * free up any allocated data for a port
  1472. * This should not touch anything that would affect a simultaneous
  1473. * re-allocation of port data, because it is called after ipath_mutex
  1474. * is released (and can be called from reinit as well).
  1475. * It should never change any chip state, or global driver state.
  1476. * (The only exception to global state is freeing the port0 port0_skbs.)
  1477. */
  1478. void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
  1479. {
  1480. if (!pd)
  1481. return;
  1482. if (pd->port_rcvhdrq) {
  1483. ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
  1484. "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
  1485. (unsigned long) pd->port_rcvhdrq_size);
  1486. dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
  1487. pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
  1488. pd->port_rcvhdrq = NULL;
  1489. if (pd->port_rcvhdrtail_kvaddr) {
  1490. dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
  1491. (void *)pd->port_rcvhdrtail_kvaddr,
  1492. pd->port_rcvhdrqtailaddr_phys);
  1493. pd->port_rcvhdrtail_kvaddr = NULL;
  1494. }
  1495. }
  1496. if (pd->port_port && pd->port_rcvegrbuf) {
  1497. unsigned e;
  1498. for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
  1499. void *base = pd->port_rcvegrbuf[e];
  1500. size_t size = pd->port_rcvegrbuf_size;
  1501. ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
  1502. "chunk %u/%u\n", base,
  1503. (unsigned long) size,
  1504. e, pd->port_rcvegrbuf_chunks);
  1505. dma_free_coherent(&dd->pcidev->dev, size,
  1506. base, pd->port_rcvegrbuf_phys[e]);
  1507. }
  1508. vfree(pd->port_rcvegrbuf);
  1509. pd->port_rcvegrbuf = NULL;
  1510. vfree(pd->port_rcvegrbuf_phys);
  1511. pd->port_rcvegrbuf_phys = NULL;
  1512. pd->port_rcvegrbuf_chunks = 0;
  1513. } else if (pd->port_port == 0 && dd->ipath_port0_skbs) {
  1514. unsigned e;
  1515. struct sk_buff **skbs = dd->ipath_port0_skbs;
  1516. dd->ipath_port0_skbs = NULL;
  1517. ipath_cdbg(VERBOSE, "free closed port %d ipath_port0_skbs "
  1518. "@ %p\n", pd->port_port, skbs);
  1519. for (e = 0; e < dd->ipath_rcvegrcnt; e++)
  1520. if (skbs[e])
  1521. dev_kfree_skb(skbs[e]);
  1522. vfree(skbs);
  1523. }
  1524. kfree(pd->port_tid_pg_list);
  1525. kfree(pd);
  1526. }
  1527. static int __init infinipath_init(void)
  1528. {
  1529. int ret;
  1530. ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version);
  1531. /*
  1532. * These must be called before the driver is registered with
  1533. * the PCI subsystem.
  1534. */
  1535. idr_init(&unit_table);
  1536. if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
  1537. ret = -ENOMEM;
  1538. goto bail;
  1539. }
  1540. ret = pci_register_driver(&ipath_driver);
  1541. if (ret < 0) {
  1542. printk(KERN_ERR IPATH_DRV_NAME
  1543. ": Unable to register driver: error %d\n", -ret);
  1544. goto bail_unit;
  1545. }
  1546. ret = ipath_driver_create_group(&ipath_driver.driver);
  1547. if (ret < 0) {
  1548. printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
  1549. "sysfs entries: error %d\n", -ret);
  1550. goto bail_pci;
  1551. }
  1552. ret = ipath_init_ipathfs();
  1553. if (ret < 0) {
  1554. printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
  1555. "ipathfs: error %d\n", -ret);
  1556. goto bail_group;
  1557. }
  1558. goto bail;
  1559. bail_group:
  1560. ipath_driver_remove_group(&ipath_driver.driver);
  1561. bail_pci:
  1562. pci_unregister_driver(&ipath_driver);
  1563. bail_unit:
  1564. idr_destroy(&unit_table);
  1565. bail:
  1566. return ret;
  1567. }
  1568. static void cleanup_device(struct ipath_devdata *dd)
  1569. {
  1570. int port;
  1571. ipath_shutdown_device(dd);
  1572. if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
  1573. /* can't do anything more with chip; needs re-init */
  1574. *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
  1575. if (dd->ipath_kregbase) {
  1576. /*
  1577. * if we haven't already cleaned up before these are
  1578. * to ensure any register reads/writes "fail" until
  1579. * re-init
  1580. */
  1581. dd->ipath_kregbase = NULL;
  1582. dd->ipath_uregbase = 0;
  1583. dd->ipath_sregbase = 0;
  1584. dd->ipath_cregbase = 0;
  1585. dd->ipath_kregsize = 0;
  1586. }
  1587. ipath_disable_wc(dd);
  1588. }
  1589. if (dd->ipath_pioavailregs_dma) {
  1590. dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
  1591. (void *) dd->ipath_pioavailregs_dma,
  1592. dd->ipath_pioavailregs_phys);
  1593. dd->ipath_pioavailregs_dma = NULL;
  1594. }
  1595. if (dd->ipath_pageshadow) {
  1596. struct page **tmpp = dd->ipath_pageshadow;
  1597. int i, cnt = 0;
  1598. ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
  1599. "locked\n");
  1600. for (port = 0; port < dd->ipath_cfgports; port++) {
  1601. int port_tidbase = port * dd->ipath_rcvtidcnt;
  1602. int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
  1603. for (i = port_tidbase; i < maxtid; i++) {
  1604. if (!tmpp[i])
  1605. continue;
  1606. ipath_release_user_pages(&tmpp[i], 1);
  1607. tmpp[i] = NULL;
  1608. cnt++;
  1609. }
  1610. }
  1611. if (cnt) {
  1612. ipath_stats.sps_pageunlocks += cnt;
  1613. ipath_cdbg(VERBOSE, "There were still %u expTID "
  1614. "entries locked\n", cnt);
  1615. }
  1616. if (ipath_stats.sps_pagelocks ||
  1617. ipath_stats.sps_pageunlocks)
  1618. ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
  1619. "unlocked via ipath_m{un}lock\n",
  1620. (unsigned long long)
  1621. ipath_stats.sps_pagelocks,
  1622. (unsigned long long)
  1623. ipath_stats.sps_pageunlocks);
  1624. ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
  1625. dd->ipath_pageshadow);
  1626. vfree(dd->ipath_pageshadow);
  1627. dd->ipath_pageshadow = NULL;
  1628. }
  1629. /*
  1630. * free any resources still in use (usually just kernel ports)
  1631. * at unload; we do for portcnt, not cfgports, because cfgports
  1632. * could have changed while we were loaded.
  1633. */
  1634. for (port = 0; port < dd->ipath_portcnt; port++) {
  1635. struct ipath_portdata *pd = dd->ipath_pd[port];
  1636. dd->ipath_pd[port] = NULL;
  1637. ipath_free_pddata(dd, pd);
  1638. }
  1639. kfree(dd->ipath_pd);
  1640. /*
  1641. * debuggability, in case some cleanup path tries to use it
  1642. * after this
  1643. */
  1644. dd->ipath_pd = NULL;
  1645. }
  1646. static void __exit infinipath_cleanup(void)
  1647. {
  1648. struct ipath_devdata *dd, *tmp;
  1649. unsigned long flags;
  1650. ipath_exit_ipathfs();
  1651. ipath_driver_remove_group(&ipath_driver.driver);
  1652. spin_lock_irqsave(&ipath_devs_lock, flags);
  1653. /*
  1654. * turn off rcv, send, and interrupts for all ports, all drivers
  1655. * should also hard reset the chip here?
  1656. * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
  1657. * for all versions of the driver, if they were allocated
  1658. */
  1659. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  1660. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  1661. if (dd->ipath_kregbase)
  1662. cleanup_device(dd);
  1663. if (dd->pcidev) {
  1664. if (dd->pcidev->irq) {
  1665. ipath_cdbg(VERBOSE,
  1666. "unit %u free_irq of irq %x\n",
  1667. dd->ipath_unit, dd->pcidev->irq);
  1668. free_irq(dd->pcidev->irq, dd);
  1669. } else
  1670. ipath_dbg("irq is 0, not doing free_irq "
  1671. "for unit %u\n", dd->ipath_unit);
  1672. /*
  1673. * we check for NULL here, because it's outside
  1674. * the kregbase check, and we need to call it
  1675. * after the free_irq. Thus it's possible that
  1676. * the function pointers were never initialized.
  1677. */
  1678. if (dd->ipath_f_cleanup)
  1679. /* clean up chip-specific stuff */
  1680. dd->ipath_f_cleanup(dd);
  1681. dd->pcidev = NULL;
  1682. }
  1683. spin_lock_irqsave(&ipath_devs_lock, flags);
  1684. }
  1685. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  1686. ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
  1687. pci_unregister_driver(&ipath_driver);
  1688. idr_destroy(&unit_table);
  1689. }
  1690. /**
  1691. * ipath_reset_device - reset the chip if possible
  1692. * @unit: the device to reset
  1693. *
  1694. * Whether or not reset is successful, we attempt to re-initialize the chip
  1695. * (that is, much like a driver unload/reload). We clear the INITTED flag
  1696. * so that the various entry points will fail until we reinitialize. For
  1697. * now, we only allow this if no user ports are open that use chip resources
  1698. */
  1699. int ipath_reset_device(int unit)
  1700. {
  1701. int ret, i;
  1702. struct ipath_devdata *dd = ipath_lookup(unit);
  1703. if (!dd) {
  1704. ret = -ENODEV;
  1705. goto bail;
  1706. }
  1707. dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
  1708. if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
  1709. dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
  1710. "not initialized or not present\n", unit);
  1711. ret = -ENXIO;
  1712. goto bail;
  1713. }
  1714. if (dd->ipath_pd)
  1715. for (i = 1; i < dd->ipath_cfgports; i++) {
  1716. if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
  1717. ipath_dbg("unit %u port %d is in use "
  1718. "(PID %u cmd %s), can't reset\n",
  1719. unit, i,
  1720. dd->ipath_pd[i]->port_pid,
  1721. dd->ipath_pd[i]->port_comm);
  1722. ret = -EBUSY;
  1723. goto bail;
  1724. }
  1725. }
  1726. dd->ipath_flags &= ~IPATH_INITTED;
  1727. ret = dd->ipath_f_reset(dd);
  1728. if (ret != 1)
  1729. ipath_dbg("reset was not successful\n");
  1730. ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
  1731. unit);
  1732. ret = ipath_init_chip(dd, 1);
  1733. if (ret)
  1734. ipath_dev_err(dd, "Reinitialize unit %u after "
  1735. "reset failed with %d\n", unit, ret);
  1736. else
  1737. dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
  1738. "resetting\n", unit);
  1739. bail:
  1740. return ret;
  1741. }
  1742. module_init(infinipath_init);
  1743. module_exit(infinipath_cleanup);