ipath_init_chip.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/pci.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/vmalloc.h>
  35. #include "ipath_kernel.h"
  36. #include "ips_common.h"
  37. /*
  38. * min buffers we want to have per port, after driver
  39. */
  40. #define IPATH_MIN_USER_PORT_BUFCNT 8
  41. /*
  42. * Number of ports we are configured to use (to allow for more pio
  43. * buffers per port, etc.) Zero means use chip value.
  44. */
  45. static ushort ipath_cfgports;
  46. module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
  47. MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
  48. /*
  49. * Number of buffers reserved for driver (layered drivers and SMA
  50. * send). Reserved at end of buffer list.
  51. */
  52. static ushort ipath_kpiobufs = 32;
  53. static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
  54. module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint,
  55. &ipath_kpiobufs, S_IWUSR | S_IRUGO);
  56. MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
  57. /**
  58. * create_port0_egr - allocate the eager TID buffers
  59. * @dd: the infinipath device
  60. *
  61. * This code is now quite different for user and kernel, because
  62. * the kernel uses skb's, for the accelerated network performance.
  63. * This is the kernel (port0) version.
  64. *
  65. * Allocate the eager TID buffers and program them into infinipath.
  66. * We use the network layer alloc_skb() allocator to allocate the
  67. * memory, and either use the buffers as is for things like SMA
  68. * packets, or pass the buffers up to the ipath layered driver and
  69. * thence the network layer, replacing them as we do so (see
  70. * ipath_rcv_layer()).
  71. */
  72. static int create_port0_egr(struct ipath_devdata *dd)
  73. {
  74. unsigned e, egrcnt;
  75. struct sk_buff **skbs;
  76. int ret;
  77. egrcnt = dd->ipath_rcvegrcnt;
  78. skbs = vmalloc(sizeof(*dd->ipath_port0_skbs) * egrcnt);
  79. if (skbs == NULL) {
  80. ipath_dev_err(dd, "allocation error for eager TID "
  81. "skb array\n");
  82. ret = -ENOMEM;
  83. goto bail;
  84. }
  85. for (e = 0; e < egrcnt; e++) {
  86. /*
  87. * This is a bit tricky in that we allocate extra
  88. * space for 2 bytes of the 14 byte ethernet header.
  89. * These two bytes are passed in the ipath header so
  90. * the rest of the data is word aligned. We allocate
  91. * 4 bytes so that the data buffer stays word aligned.
  92. * See ipath_kreceive() for more details.
  93. */
  94. skbs[e] = ipath_alloc_skb(dd, GFP_KERNEL);
  95. if (!skbs[e]) {
  96. ipath_dev_err(dd, "SKB allocation error for "
  97. "eager TID %u\n", e);
  98. while (e != 0)
  99. dev_kfree_skb(skbs[--e]);
  100. ret = -ENOMEM;
  101. goto bail;
  102. }
  103. }
  104. /*
  105. * After loop above, so we can test non-NULL to see if ready
  106. * to use at receive, etc.
  107. */
  108. dd->ipath_port0_skbs = skbs;
  109. for (e = 0; e < egrcnt; e++) {
  110. unsigned long phys =
  111. virt_to_phys(dd->ipath_port0_skbs[e]->data);
  112. dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
  113. ((char __iomem *) dd->ipath_kregbase +
  114. dd->ipath_rcvegrbase), 0, phys);
  115. }
  116. ret = 0;
  117. bail:
  118. return ret;
  119. }
  120. static int bringup_link(struct ipath_devdata *dd)
  121. {
  122. u64 val, ibc;
  123. int ret = 0;
  124. /* hold IBC in reset */
  125. dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
  126. ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
  127. dd->ipath_control);
  128. /*
  129. * Note that prior to try 14 or 15 of IB, the credit scaling
  130. * wasn't working, because it was swapped for writes with the
  131. * 1 bit default linkstate field
  132. */
  133. /* ignore pbc and align word */
  134. val = dd->ipath_piosize2k - 2 * sizeof(u32);
  135. /*
  136. * for ICRC, which we only send in diag test pkt mode, and we
  137. * don't need to worry about that for mtu
  138. */
  139. val += 1;
  140. /*
  141. * Set the IBC maxpktlength to the size of our pio buffers the
  142. * maxpktlength is in words. This is *not* the IB data MTU.
  143. */
  144. ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
  145. /* in KB */
  146. ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
  147. /*
  148. * How often flowctrl sent. More or less in usecs; balance against
  149. * watermark value, so that in theory senders always get a flow
  150. * control update in time to not let the IB link go idle.
  151. */
  152. ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
  153. /* max error tolerance */
  154. ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
  155. /* use "real" buffer space for */
  156. ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
  157. /* IB credit flow control. */
  158. ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
  159. /* initially come up waiting for TS1, without sending anything. */
  160. dd->ipath_ibcctrl = ibc;
  161. /*
  162. * Want to start out with both LINKCMD and LINKINITCMD in NOP
  163. * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
  164. * to stay a NOP
  165. */
  166. ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
  167. INFINIPATH_IBCC_LINKINITCMD_SHIFT;
  168. ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
  169. (unsigned long long) ibc);
  170. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
  171. // be sure chip saw it
  172. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
  173. ret = dd->ipath_f_bringup_serdes(dd);
  174. if (ret)
  175. dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
  176. "not usable\n");
  177. else {
  178. /* enable IBC */
  179. dd->ipath_control |= INFINIPATH_C_LINKENABLE;
  180. ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
  181. dd->ipath_control);
  182. }
  183. return ret;
  184. }
  185. static int init_chip_first(struct ipath_devdata *dd,
  186. struct ipath_portdata **pdp)
  187. {
  188. struct ipath_portdata *pd = NULL;
  189. int ret = 0;
  190. u64 val;
  191. /*
  192. * skip cfgports stuff because we are not allocating memory,
  193. * and we don't want problems if the portcnt changed due to
  194. * cfgports. We do still check and report a difference, if
  195. * not same (should be impossible).
  196. */
  197. dd->ipath_portcnt =
  198. ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
  199. if (!ipath_cfgports)
  200. dd->ipath_cfgports = dd->ipath_portcnt;
  201. else if (ipath_cfgports <= dd->ipath_portcnt) {
  202. dd->ipath_cfgports = ipath_cfgports;
  203. ipath_dbg("Configured to use %u ports out of %u in chip\n",
  204. dd->ipath_cfgports, dd->ipath_portcnt);
  205. } else {
  206. dd->ipath_cfgports = dd->ipath_portcnt;
  207. ipath_dbg("Tried to configured to use %u ports; chip "
  208. "only supports %u\n", ipath_cfgports,
  209. dd->ipath_portcnt);
  210. }
  211. dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports,
  212. GFP_KERNEL);
  213. if (!dd->ipath_pd) {
  214. ipath_dev_err(dd, "Unable to allocate portdata array, "
  215. "failing\n");
  216. ret = -ENOMEM;
  217. goto done;
  218. }
  219. dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
  220. * dd->ipath_cfgports,
  221. GFP_KERNEL);
  222. dd->ipath_lastrcvhdrqtails =
  223. kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
  224. * dd->ipath_cfgports, GFP_KERNEL);
  225. if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
  226. ipath_dev_err(dd, "Unable to allocate head arrays, "
  227. "failing\n");
  228. ret = -ENOMEM;
  229. goto done;
  230. }
  231. dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL);
  232. if (!dd->ipath_pd[0]) {
  233. ipath_dev_err(dd, "Unable to allocate portdata for port "
  234. "0, failing\n");
  235. ret = -ENOMEM;
  236. goto done;
  237. }
  238. pd = dd->ipath_pd[0];
  239. pd->port_dd = dd;
  240. pd->port_port = 0;
  241. pd->port_cnt = 1;
  242. /* The port 0 pkey table is used by the layer interface. */
  243. pd->port_pkeys[0] = IPS_DEFAULT_P_KEY;
  244. dd->ipath_rcvtidcnt =
  245. ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
  246. dd->ipath_rcvtidbase =
  247. ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
  248. dd->ipath_rcvegrcnt =
  249. ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
  250. dd->ipath_rcvegrbase =
  251. ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
  252. dd->ipath_palign =
  253. ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
  254. dd->ipath_piobufbase =
  255. ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
  256. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
  257. dd->ipath_piosize2k = val & ~0U;
  258. dd->ipath_piosize4k = val >> 32;
  259. dd->ipath_ibmtu = 4096; /* default to largest legal MTU */
  260. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
  261. dd->ipath_piobcnt2k = val & ~0U;
  262. dd->ipath_piobcnt4k = val >> 32;
  263. dd->ipath_pio2kbase =
  264. (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
  265. (dd->ipath_piobufbase & 0xffffffff));
  266. if (dd->ipath_piobcnt4k) {
  267. dd->ipath_pio4kbase = (u32 __iomem *)
  268. (((char __iomem *) dd->ipath_kregbase) +
  269. (dd->ipath_piobufbase >> 32));
  270. /*
  271. * 4K buffers take 2 pages; we use roundup just to be
  272. * paranoid; we calculate it once here, rather than on
  273. * ever buf allocate
  274. */
  275. dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
  276. dd->ipath_palign);
  277. ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
  278. "(%x aligned)\n",
  279. dd->ipath_piobcnt2k, dd->ipath_piosize2k,
  280. dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
  281. dd->ipath_piosize4k, dd->ipath_pio4kbase,
  282. dd->ipath_4kalign);
  283. }
  284. else ipath_dbg("%u 2k piobufs @ %p\n",
  285. dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
  286. spin_lock_init(&dd->ipath_tid_lock);
  287. done:
  288. *pdp = pd;
  289. return ret;
  290. }
  291. /**
  292. * init_chip_reset - re-initialize after a reset, or enable
  293. * @dd: the infinipath device
  294. * @pdp: output for port data
  295. *
  296. * sanity check at least some of the values after reset, and
  297. * ensure no receive or transmit (explictly, in case reset
  298. * failed
  299. */
  300. static int init_chip_reset(struct ipath_devdata *dd,
  301. struct ipath_portdata **pdp)
  302. {
  303. struct ipath_portdata *pd;
  304. u32 rtmp;
  305. *pdp = pd = dd->ipath_pd[0];
  306. /* ensure chip does no sends or receives while we re-initialize */
  307. dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
  308. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
  309. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
  310. ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
  311. rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
  312. if (dd->ipath_portcnt != rtmp)
  313. dev_info(&dd->pcidev->dev, "portcnt was %u before "
  314. "reset, now %u, using original\n",
  315. dd->ipath_portcnt, rtmp);
  316. rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
  317. if (rtmp != dd->ipath_rcvtidcnt)
  318. dev_info(&dd->pcidev->dev, "tidcnt was %u before "
  319. "reset, now %u, using original\n",
  320. dd->ipath_rcvtidcnt, rtmp);
  321. rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
  322. if (rtmp != dd->ipath_rcvtidbase)
  323. dev_info(&dd->pcidev->dev, "tidbase was %u before "
  324. "reset, now %u, using original\n",
  325. dd->ipath_rcvtidbase, rtmp);
  326. rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
  327. if (rtmp != dd->ipath_rcvegrcnt)
  328. dev_info(&dd->pcidev->dev, "egrcnt was %u before "
  329. "reset, now %u, using original\n",
  330. dd->ipath_rcvegrcnt, rtmp);
  331. rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
  332. if (rtmp != dd->ipath_rcvegrbase)
  333. dev_info(&dd->pcidev->dev, "egrbase was %u before "
  334. "reset, now %u, using original\n",
  335. dd->ipath_rcvegrbase, rtmp);
  336. return 0;
  337. }
  338. static int init_pioavailregs(struct ipath_devdata *dd)
  339. {
  340. int ret;
  341. dd->ipath_pioavailregs_dma = dma_alloc_coherent(
  342. &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
  343. GFP_KERNEL);
  344. if (!dd->ipath_pioavailregs_dma) {
  345. ipath_dev_err(dd, "failed to allocate PIOavail reg area "
  346. "in memory\n");
  347. ret = -ENOMEM;
  348. goto done;
  349. }
  350. /*
  351. * we really want L2 cache aligned, but for current CPUs of
  352. * interest, they are the same.
  353. */
  354. dd->ipath_statusp = (u64 *)
  355. ((char *)dd->ipath_pioavailregs_dma +
  356. ((2 * L1_CACHE_BYTES +
  357. dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
  358. /* copy the current value now that it's really allocated */
  359. *dd->ipath_statusp = dd->_ipath_status;
  360. /*
  361. * setup buffer to hold freeze msg, accessible to apps,
  362. * following statusp
  363. */
  364. dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
  365. /* and its length */
  366. dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
  367. if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) {
  368. ipath_dev_err(dd, "unit %u too large for port 0 "
  369. "rcvhdrtail buffer size\n", dd->ipath_unit);
  370. ret = -ENODEV;
  371. }
  372. else
  373. ret = 0;
  374. /* so we can get current tail in ipath_kreceive(), per chip */
  375. dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[
  376. dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];
  377. done:
  378. return ret;
  379. }
  380. /**
  381. * init_shadow_tids - allocate the shadow TID array
  382. * @dd: the infinipath device
  383. *
  384. * allocate the shadow TID array, so we can ipath_munlock previous
  385. * entries. It may make more sense to move the pageshadow to the
  386. * port data structure, so we only allocate memory for ports actually
  387. * in use, since we at 8k per port, now.
  388. */
  389. static void init_shadow_tids(struct ipath_devdata *dd)
  390. {
  391. dd->ipath_pageshadow = (struct page **)
  392. vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
  393. sizeof(struct page *));
  394. if (!dd->ipath_pageshadow)
  395. ipath_dev_err(dd, "failed to allocate shadow page * "
  396. "array, no expected sends!\n");
  397. else
  398. memset(dd->ipath_pageshadow, 0,
  399. dd->ipath_cfgports * dd->ipath_rcvtidcnt *
  400. sizeof(struct page *));
  401. }
  402. static void enable_chip(struct ipath_devdata *dd,
  403. struct ipath_portdata *pd, int reinit)
  404. {
  405. u32 val;
  406. int i;
  407. if (!reinit) {
  408. init_waitqueue_head(&ipath_sma_state_wait);
  409. }
  410. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
  411. dd->ipath_rcvctrl);
  412. /* Enable PIO send, and update of PIOavail regs to memory. */
  413. dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
  414. INFINIPATH_S_PIOBUFAVAILUPD;
  415. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  416. dd->ipath_sendctrl);
  417. /*
  418. * enable port 0 receive, and receive interrupt. other ports
  419. * done as user opens and inits them.
  420. */
  421. dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD |
  422. (1ULL << INFINIPATH_R_PORTENABLE_SHIFT) |
  423. (1ULL << INFINIPATH_R_INTRAVAIL_SHIFT);
  424. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
  425. dd->ipath_rcvctrl);
  426. /*
  427. * now ready for use. this should be cleared whenever we
  428. * detect a reset, or initiate one.
  429. */
  430. dd->ipath_flags |= IPATH_INITTED;
  431. /*
  432. * init our shadow copies of head from tail values, and write
  433. * head values to match.
  434. */
  435. val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
  436. (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
  437. dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
  438. /* Initialize so we interrupt on next packet received */
  439. (void)ipath_write_ureg(dd, ur_rcvhdrhead,
  440. dd->ipath_rhdrhead_intr_off |
  441. dd->ipath_port0head, 0);
  442. /*
  443. * by now pioavail updates to memory should have occurred, so
  444. * copy them into our working/shadow registers; this is in
  445. * case something went wrong with abort, but mostly to get the
  446. * initial values of the generation bit correct.
  447. */
  448. for (i = 0; i < dd->ipath_pioavregs; i++) {
  449. __le64 val;
  450. /*
  451. * Chip Errata bug 6641; even and odd qwords>3 are swapped.
  452. */
  453. if (i > 3) {
  454. if (i & 1)
  455. val = dd->ipath_pioavailregs_dma[i - 1];
  456. else
  457. val = dd->ipath_pioavailregs_dma[i + 1];
  458. }
  459. else
  460. val = dd->ipath_pioavailregs_dma[i];
  461. dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
  462. }
  463. /* can get counters, stats, etc. */
  464. dd->ipath_flags |= IPATH_PRESENT;
  465. }
  466. static int init_housekeeping(struct ipath_devdata *dd,
  467. struct ipath_portdata **pdp, int reinit)
  468. {
  469. char boardn[32];
  470. int ret = 0;
  471. /*
  472. * have to clear shadow copies of registers at init that are
  473. * not otherwise set here, or all kinds of bizarre things
  474. * happen with driver on chip reset
  475. */
  476. dd->ipath_rcvhdrsize = 0;
  477. /*
  478. * Don't clear ipath_flags as 8bit mode was set before
  479. * entering this func. However, we do set the linkstate to
  480. * unknown, so we can watch for a transition.
  481. */
  482. dd->ipath_flags |= IPATH_LINKUNK;
  483. dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
  484. IPATH_LINKDOWN | IPATH_LINKINIT);
  485. ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
  486. dd->ipath_revision =
  487. ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
  488. /*
  489. * set up fundamental info we need to use the chip; we assume
  490. * if the revision reg and these regs are OK, we don't need to
  491. * special case the rest
  492. */
  493. dd->ipath_sregbase =
  494. ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
  495. dd->ipath_cregbase =
  496. ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
  497. dd->ipath_uregbase =
  498. ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
  499. ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
  500. "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
  501. dd->ipath_uregbase, dd->ipath_cregbase);
  502. if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
  503. || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
  504. || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
  505. || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
  506. ipath_dev_err(dd, "Register read failures from chip, "
  507. "giving up initialization\n");
  508. ret = -ENODEV;
  509. goto done;
  510. }
  511. /* clear the initial reset flag, in case first driver load */
  512. ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
  513. INFINIPATH_E_RESET);
  514. if (reinit)
  515. ret = init_chip_reset(dd, pdp);
  516. else
  517. ret = init_chip_first(dd, pdp);
  518. if (ret)
  519. goto done;
  520. ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
  521. "%u egrtids\n", (unsigned long long) dd->ipath_revision,
  522. dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
  523. dd->ipath_rcvegrcnt);
  524. if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
  525. INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
  526. ipath_dev_err(dd, "Driver only handles version %d, "
  527. "chip swversion is %d (%llx), failng\n",
  528. IPATH_CHIP_SWVERSION,
  529. (int)(dd->ipath_revision >>
  530. INFINIPATH_R_SOFTWARE_SHIFT) &
  531. INFINIPATH_R_SOFTWARE_MASK,
  532. (unsigned long long) dd->ipath_revision);
  533. ret = -ENOSYS;
  534. goto done;
  535. }
  536. dd->ipath_majrev = (u8) ((dd->ipath_revision >>
  537. INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
  538. INFINIPATH_R_CHIPREVMAJOR_MASK);
  539. dd->ipath_minrev = (u8) ((dd->ipath_revision >>
  540. INFINIPATH_R_CHIPREVMINOR_SHIFT) &
  541. INFINIPATH_R_CHIPREVMINOR_MASK);
  542. dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
  543. INFINIPATH_R_BOARDID_SHIFT) &
  544. INFINIPATH_R_BOARDID_MASK);
  545. ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
  546. snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
  547. "Driver %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
  548. "SW Compat %u\n",
  549. IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
  550. (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
  551. INFINIPATH_R_ARCH_MASK,
  552. dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
  553. (unsigned)(dd->ipath_revision >>
  554. INFINIPATH_R_SOFTWARE_SHIFT) &
  555. INFINIPATH_R_SOFTWARE_MASK);
  556. ipath_dbg("%s", dd->ipath_boardversion);
  557. done:
  558. return ret;
  559. }
  560. /**
  561. * ipath_init_chip - do the actual initialization sequence on the chip
  562. * @dd: the infinipath device
  563. * @reinit: reinitializing, so don't allocate new memory
  564. *
  565. * Do the actual initialization sequence on the chip. This is done
  566. * both from the init routine called from the PCI infrastructure, and
  567. * when we reset the chip, or detect that it was reset internally,
  568. * or it's administratively re-enabled.
  569. *
  570. * Memory allocation here and in called routines is only done in
  571. * the first case (reinit == 0). We have to be careful, because even
  572. * without memory allocation, we need to re-write all the chip registers
  573. * TIDs, etc. after the reset or enable has completed.
  574. */
  575. int ipath_init_chip(struct ipath_devdata *dd, int reinit)
  576. {
  577. int ret = 0, i;
  578. u32 val32, kpiobufs;
  579. u64 val, atmp;
  580. struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
  581. ret = init_housekeeping(dd, &pd, reinit);
  582. if (ret)
  583. goto done;
  584. /*
  585. * we ignore most issues after reporting them, but have to specially
  586. * handle hardware-disabled chips.
  587. */
  588. if (ret == 2) {
  589. /* unique error, known to ipath_init_one */
  590. ret = -EPERM;
  591. goto done;
  592. }
  593. /*
  594. * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
  595. * but then it no longer nicely fits power of two, and since
  596. * we now use routines that backend onto __get_free_pages, the
  597. * rest would be wasted.
  598. */
  599. dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt;
  600. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
  601. dd->ipath_rcvhdrcnt);
  602. /*
  603. * Set up the shadow copies of the piobufavail registers,
  604. * which we compare against the chip registers for now, and
  605. * the in memory DMA'ed copies of the registers. This has to
  606. * be done early, before we calculate lastport, etc.
  607. */
  608. val = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
  609. /*
  610. * calc number of pioavail registers, and save it; we have 2
  611. * bits per buffer.
  612. */
  613. dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
  614. / (sizeof(u64) * BITS_PER_BYTE / 2);
  615. if (!ipath_kpiobufs) /* have to have at least 1, for SMA */
  616. kpiobufs = ipath_kpiobufs = 1;
  617. else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) <
  618. (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) {
  619. dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) "
  620. "for %u ports to have %u each!\n",
  621. dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
  622. dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT);
  623. kpiobufs = 1; /* reserve just the minimum for SMA/ether */
  624. } else
  625. kpiobufs = ipath_kpiobufs;
  626. if (kpiobufs >
  627. (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
  628. (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) {
  629. i = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
  630. (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT);
  631. if (i < 0)
  632. i = 0;
  633. dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs for "
  634. "kernel leaves too few for %d user ports "
  635. "(%d each); using %u\n", kpiobufs,
  636. dd->ipath_cfgports - 1,
  637. IPATH_MIN_USER_PORT_BUFCNT, i);
  638. /*
  639. * shouldn't change ipath_kpiobufs, because could be
  640. * different for different devices...
  641. */
  642. kpiobufs = i;
  643. }
  644. dd->ipath_lastport_piobuf =
  645. dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - kpiobufs;
  646. dd->ipath_pbufsport = dd->ipath_cfgports > 1
  647. ? dd->ipath_lastport_piobuf / (dd->ipath_cfgports - 1)
  648. : 0;
  649. val32 = dd->ipath_lastport_piobuf -
  650. (dd->ipath_pbufsport * (dd->ipath_cfgports - 1));
  651. if (val32 > 0) {
  652. ipath_dbg("allocating %u pbufs/port leaves %u unused, "
  653. "add to kernel\n", dd->ipath_pbufsport, val32);
  654. dd->ipath_lastport_piobuf -= val32;
  655. ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
  656. dd->ipath_pbufsport, val32);
  657. }
  658. dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
  659. ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
  660. "each for %u user ports\n", kpiobufs,
  661. dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
  662. dd->ipath_pbufsport, dd->ipath_cfgports - 1);
  663. dd->ipath_f_early_init(dd);
  664. /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
  665. * done after early_init */
  666. dd->ipath_hdrqlast =
  667. dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
  668. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
  669. dd->ipath_rcvhdrentsize);
  670. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
  671. dd->ipath_rcvhdrsize);
  672. if (!reinit) {
  673. ret = init_pioavailregs(dd);
  674. init_shadow_tids(dd);
  675. if (ret)
  676. goto done;
  677. }
  678. (void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
  679. dd->ipath_pioavailregs_phys);
  680. /*
  681. * this is to detect s/w errors, which the h/w works around by
  682. * ignoring the low 6 bits of address, if it wasn't aligned.
  683. */
  684. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
  685. if (val != dd->ipath_pioavailregs_phys) {
  686. ipath_dev_err(dd, "Catastrophic software error, "
  687. "SendPIOAvailAddr written as %lx, "
  688. "read back as %llx\n",
  689. (unsigned long) dd->ipath_pioavailregs_phys,
  690. (unsigned long long) val);
  691. ret = -EINVAL;
  692. goto done;
  693. }
  694. val = ipath_port0_rcvhdrtail_dma + dd->ipath_unit * 64;
  695. /* verify that the alignment requirement was met */
  696. ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
  697. 0, val);
  698. atmp = ipath_read_kreg64_port(
  699. dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 0);
  700. if (val != atmp) {
  701. ipath_dev_err(dd, "Catastrophic software error, "
  702. "RcvHdrTailAddr0 written as %llx, "
  703. "read back as %llx from %x\n",
  704. (unsigned long long) val,
  705. (unsigned long long) atmp,
  706. dd->ipath_kregs->kr_rcvhdrtailaddr);
  707. ret = -EINVAL;
  708. goto done;
  709. }
  710. ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
  711. /*
  712. * make sure we are not in freeze, and PIO send enabled, so
  713. * writes to pbc happen
  714. */
  715. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
  716. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
  717. ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
  718. ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
  719. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  720. INFINIPATH_S_PIOENABLE);
  721. /*
  722. * before error clears, since we expect serdes pll errors during
  723. * this, the first time after reset
  724. */
  725. if (bringup_link(dd)) {
  726. dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
  727. ret = -ENETDOWN;
  728. goto done;
  729. }
  730. /*
  731. * clear any "expected" hwerrs from reset and/or initialization
  732. * clear any that aren't enabled (at least this once), and then
  733. * set the enable mask
  734. */
  735. dd->ipath_f_init_hwerrors(dd);
  736. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
  737. ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
  738. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
  739. dd->ipath_hwerrmask);
  740. dd->ipath_maskederrs = dd->ipath_ignorederrs;
  741. /* clear all */
  742. ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
  743. /* enable errors that are masked, at least this first time. */
  744. ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
  745. ~dd->ipath_maskederrs);
  746. /* clear any interrups up to this point (ints still not enabled) */
  747. ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
  748. ipath_stats.sps_lid[dd->ipath_unit] = dd->ipath_lid;
  749. /*
  750. * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
  751. * re-init, the simplest way to handle this is to free
  752. * existing, and re-allocate.
  753. */
  754. if (reinit)
  755. ipath_free_pddata(dd, 0, 0);
  756. dd->ipath_f_tidtemplate(dd);
  757. ret = ipath_create_rcvhdrq(dd, pd);
  758. if (!ret)
  759. ret = create_port0_egr(dd);
  760. if (ret)
  761. ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
  762. "rcvhdrq and/or egr bufs\n");
  763. else
  764. enable_chip(dd, pd, reinit);
  765. /*
  766. * cause retrigger of pending interrupts ignored during init,
  767. * even if we had errors
  768. */
  769. ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
  770. if(!dd->ipath_stats_timer_active) {
  771. /*
  772. * first init, or after an admin disable/enable
  773. * set up stats retrieval timer, even if we had errors
  774. * in last portion of setup
  775. */
  776. init_timer(&dd->ipath_stats_timer);
  777. dd->ipath_stats_timer.function = ipath_get_faststats;
  778. dd->ipath_stats_timer.data = (unsigned long) dd;
  779. /* every 5 seconds; */
  780. dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
  781. /* takes ~16 seconds to overflow at full IB 4x bandwdith */
  782. add_timer(&dd->ipath_stats_timer);
  783. dd->ipath_stats_timer_active = 1;
  784. }
  785. done:
  786. if (!ret) {
  787. ipath_get_guid(dd);
  788. *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
  789. if (!dd->ipath_f_intrsetup(dd)) {
  790. /* now we can enable all interrupts from the chip */
  791. ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
  792. -1LL);
  793. /* force re-interrupt of any pending interrupts. */
  794. ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
  795. 0ULL);
  796. /* chip is usable; mark it as initialized */
  797. *dd->ipath_statusp |= IPATH_STATUS_INITTED;
  798. } else
  799. ipath_dev_err(dd, "No interrupts enabled, couldn't "
  800. "setup interrupt address\n");
  801. if (dd->ipath_cfgports > ipath_stats.sps_nports)
  802. /*
  803. * sps_nports is a global, so, we set it to
  804. * the highest number of ports of any of the
  805. * chips we find; we never decrement it, at
  806. * least for now. Since this might have changed
  807. * over disable/enable or prior to reset, always
  808. * do the check and potentially adjust.
  809. */
  810. ipath_stats.sps_nports = dd->ipath_cfgports;
  811. } else
  812. ipath_dbg("Failed (%d) to initialize chip\n", ret);
  813. /* if ret is non-zero, we probably should do some cleanup
  814. here... */
  815. return ret;
  816. }
  817. static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
  818. {
  819. struct ipath_devdata *dd;
  820. unsigned long flags;
  821. unsigned short val;
  822. int ret;
  823. ret = ipath_parse_ushort(str, &val);
  824. spin_lock_irqsave(&ipath_devs_lock, flags);
  825. if (ret < 0)
  826. goto bail;
  827. if (val == 0) {
  828. ret = -EINVAL;
  829. goto bail;
  830. }
  831. list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
  832. if (dd->ipath_kregbase)
  833. continue;
  834. if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
  835. (dd->ipath_cfgports *
  836. IPATH_MIN_USER_PORT_BUFCNT)))
  837. {
  838. ipath_dev_err(
  839. dd,
  840. "Allocating %d PIO bufs for kernel leaves "
  841. "too few for %d user ports (%d each)\n",
  842. val, dd->ipath_cfgports - 1,
  843. IPATH_MIN_USER_PORT_BUFCNT);
  844. ret = -EINVAL;
  845. goto bail;
  846. }
  847. dd->ipath_lastport_piobuf =
  848. dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
  849. }
  850. ret = 0;
  851. bail:
  852. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  853. return ret;
  854. }