ipath_layer.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. /*
  2. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /*
  33. * These are the routines used by layered drivers, currently just the
  34. * layered ethernet driver and verbs layer.
  35. */
  36. #include <linux/io.h>
  37. #include <linux/pci.h>
  38. #include <asm/byteorder.h>
  39. #include "ipath_kernel.h"
  40. #include "ips_common.h"
  41. #include "ipath_layer.h"
  42. /* Acquire before ipath_devs_lock. */
  43. static DEFINE_MUTEX(ipath_layer_mutex);
  44. static int ipath_verbs_registered;
  45. u16 ipath_layer_rcv_opcode;
  46. static int (*layer_intr)(void *, u32);
  47. static int (*layer_rcv)(void *, void *, struct sk_buff *);
  48. static int (*layer_rcv_lid)(void *, void *);
  49. static int (*verbs_piobufavail)(void *);
  50. static void (*verbs_rcv)(void *, void *, void *, u32);
  51. static void *(*layer_add_one)(int, struct ipath_devdata *);
  52. static void (*layer_remove_one)(void *);
  53. static void *(*verbs_add_one)(int, struct ipath_devdata *);
  54. static void (*verbs_remove_one)(void *);
  55. static void (*verbs_timer_cb)(void *);
  56. int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  57. {
  58. int ret = -ENODEV;
  59. if (dd->ipath_layer.l_arg && layer_intr)
  60. ret = layer_intr(dd->ipath_layer.l_arg, arg);
  61. return ret;
  62. }
  63. int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  64. {
  65. int ret;
  66. mutex_lock(&ipath_layer_mutex);
  67. ret = __ipath_layer_intr(dd, arg);
  68. mutex_unlock(&ipath_layer_mutex);
  69. return ret;
  70. }
  71. int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
  72. struct sk_buff *skb)
  73. {
  74. int ret = -ENODEV;
  75. if (dd->ipath_layer.l_arg && layer_rcv)
  76. ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
  77. return ret;
  78. }
  79. int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
  80. {
  81. int ret = -ENODEV;
  82. if (dd->ipath_layer.l_arg && layer_rcv_lid)
  83. ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
  84. return ret;
  85. }
  86. int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
  87. {
  88. int ret = -ENODEV;
  89. if (dd->verbs_layer.l_arg && verbs_piobufavail)
  90. ret = verbs_piobufavail(dd->verbs_layer.l_arg);
  91. return ret;
  92. }
  93. int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
  94. u32 tlen)
  95. {
  96. int ret = -ENODEV;
  97. if (dd->verbs_layer.l_arg && verbs_rcv) {
  98. verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
  99. ret = 0;
  100. }
  101. return ret;
  102. }
  103. int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
  104. {
  105. u32 lstate;
  106. int ret;
  107. switch (newstate) {
  108. case IPATH_IB_LINKDOWN:
  109. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
  110. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  111. /* don't wait */
  112. ret = 0;
  113. goto bail;
  114. case IPATH_IB_LINKDOWN_SLEEP:
  115. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
  116. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  117. /* don't wait */
  118. ret = 0;
  119. goto bail;
  120. case IPATH_IB_LINKDOWN_DISABLE:
  121. ipath_set_ib_lstate(dd,
  122. INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
  123. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  124. /* don't wait */
  125. ret = 0;
  126. goto bail;
  127. case IPATH_IB_LINKINIT:
  128. if (dd->ipath_flags & IPATH_LINKINIT) {
  129. ret = 0;
  130. goto bail;
  131. }
  132. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
  133. INFINIPATH_IBCC_LINKCMD_SHIFT);
  134. lstate = IPATH_LINKINIT;
  135. break;
  136. case IPATH_IB_LINKARM:
  137. if (dd->ipath_flags & IPATH_LINKARMED) {
  138. ret = 0;
  139. goto bail;
  140. }
  141. if (!(dd->ipath_flags &
  142. (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
  143. ret = -EINVAL;
  144. goto bail;
  145. }
  146. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
  147. INFINIPATH_IBCC_LINKCMD_SHIFT);
  148. /*
  149. * Since the port can transition to ACTIVE by receiving
  150. * a non VL 15 packet, wait for either state.
  151. */
  152. lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
  153. break;
  154. case IPATH_IB_LINKACTIVE:
  155. if (dd->ipath_flags & IPATH_LINKACTIVE) {
  156. ret = 0;
  157. goto bail;
  158. }
  159. if (!(dd->ipath_flags & IPATH_LINKARMED)) {
  160. ret = -EINVAL;
  161. goto bail;
  162. }
  163. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
  164. INFINIPATH_IBCC_LINKCMD_SHIFT);
  165. lstate = IPATH_LINKACTIVE;
  166. break;
  167. default:
  168. ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
  169. ret = -EINVAL;
  170. goto bail;
  171. }
  172. ret = ipath_wait_linkstate(dd, lstate, 2000);
  173. bail:
  174. return ret;
  175. }
  176. EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
  177. /**
  178. * ipath_layer_set_mtu - set the MTU
  179. * @dd: the infinipath device
  180. * @arg: the new MTU
  181. *
  182. * we can handle "any" incoming size, the issue here is whether we
  183. * need to restrict our outgoing size. For now, we don't do any
  184. * sanity checking on this, and we don't deal with what happens to
  185. * programs that are already running when the size changes.
  186. * NOTE: changing the MTU will usually cause the IBC to go back to
  187. * link initialize (IPATH_IBSTATE_INIT) state...
  188. */
  189. int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
  190. {
  191. u32 piosize;
  192. int changed = 0;
  193. int ret;
  194. /*
  195. * mtu is IB data payload max. It's the largest power of 2 less
  196. * than piosize (or even larger, since it only really controls the
  197. * largest we can receive; we can send the max of the mtu and
  198. * piosize). We check that it's one of the valid IB sizes.
  199. */
  200. if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
  201. arg != 4096) {
  202. ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
  203. ret = -EINVAL;
  204. goto bail;
  205. }
  206. if (dd->ipath_ibmtu == arg) {
  207. ret = 0; /* same as current */
  208. goto bail;
  209. }
  210. piosize = dd->ipath_ibmaxlen;
  211. dd->ipath_ibmtu = arg;
  212. if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
  213. /* Only if it's not the initial value (or reset to it) */
  214. if (piosize != dd->ipath_init_ibmaxlen) {
  215. dd->ipath_ibmaxlen = piosize;
  216. changed = 1;
  217. }
  218. } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
  219. piosize = arg + IPATH_PIO_MAXIBHDR;
  220. ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
  221. "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
  222. arg);
  223. dd->ipath_ibmaxlen = piosize;
  224. changed = 1;
  225. }
  226. if (changed) {
  227. /*
  228. * set the IBC maxpktlength to the size of our pio
  229. * buffers in words
  230. */
  231. u64 ibc = dd->ipath_ibcctrl;
  232. ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
  233. INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
  234. piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
  235. dd->ipath_ibmaxlen = piosize;
  236. piosize /= sizeof(u32); /* in words */
  237. /*
  238. * for ICRC, which we only send in diag test pkt mode, and
  239. * we don't need to worry about that for mtu
  240. */
  241. piosize += 1;
  242. ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
  243. dd->ipath_ibcctrl = ibc;
  244. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  245. dd->ipath_ibcctrl);
  246. dd->ipath_f_tidtemplate(dd);
  247. }
  248. ret = 0;
  249. bail:
  250. return ret;
  251. }
  252. EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
  253. int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
  254. {
  255. ipath_stats.sps_lid[dd->ipath_unit] = arg;
  256. dd->ipath_lid = arg;
  257. dd->ipath_lmc = lmc;
  258. mutex_lock(&ipath_layer_mutex);
  259. if (dd->ipath_layer.l_arg && layer_intr)
  260. layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
  261. mutex_unlock(&ipath_layer_mutex);
  262. return 0;
  263. }
  264. EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
  265. int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
  266. {
  267. /* XXX - need to inform anyone who cares this just happened. */
  268. dd->ipath_guid = guid;
  269. return 0;
  270. }
  271. EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
  272. __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
  273. {
  274. return dd->ipath_guid;
  275. }
  276. EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
  277. u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
  278. {
  279. return dd->ipath_nguid;
  280. }
  281. EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
  282. int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
  283. u32 * boardrev, u32 * majrev, u32 * minrev)
  284. {
  285. *vendor = dd->ipath_vendorid;
  286. *boardrev = dd->ipath_boardrev;
  287. *majrev = dd->ipath_majrev;
  288. *minrev = dd->ipath_minrev;
  289. return 0;
  290. }
  291. EXPORT_SYMBOL_GPL(ipath_layer_query_device);
  292. u32 ipath_layer_get_flags(struct ipath_devdata *dd)
  293. {
  294. return dd->ipath_flags;
  295. }
  296. EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
  297. struct device *ipath_layer_get_device(struct ipath_devdata *dd)
  298. {
  299. return &dd->pcidev->dev;
  300. }
  301. EXPORT_SYMBOL_GPL(ipath_layer_get_device);
  302. u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
  303. {
  304. return dd->ipath_deviceid;
  305. }
  306. EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
  307. u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
  308. {
  309. return dd->ipath_lastibcstat;
  310. }
  311. EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
  312. u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
  313. {
  314. return dd->ipath_ibmtu;
  315. }
  316. EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
  317. void ipath_layer_add(struct ipath_devdata *dd)
  318. {
  319. mutex_lock(&ipath_layer_mutex);
  320. if (layer_add_one)
  321. dd->ipath_layer.l_arg =
  322. layer_add_one(dd->ipath_unit, dd);
  323. if (verbs_add_one)
  324. dd->verbs_layer.l_arg =
  325. verbs_add_one(dd->ipath_unit, dd);
  326. mutex_unlock(&ipath_layer_mutex);
  327. }
  328. void ipath_layer_del(struct ipath_devdata *dd)
  329. {
  330. mutex_lock(&ipath_layer_mutex);
  331. if (dd->ipath_layer.l_arg && layer_remove_one) {
  332. layer_remove_one(dd->ipath_layer.l_arg);
  333. dd->ipath_layer.l_arg = NULL;
  334. }
  335. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  336. verbs_remove_one(dd->verbs_layer.l_arg);
  337. dd->verbs_layer.l_arg = NULL;
  338. }
  339. mutex_unlock(&ipath_layer_mutex);
  340. }
  341. int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
  342. void (*l_remove)(void *),
  343. int (*l_intr)(void *, u32),
  344. int (*l_rcv)(void *, void *, struct sk_buff *),
  345. u16 l_rcv_opcode,
  346. int (*l_rcv_lid)(void *, void *))
  347. {
  348. struct ipath_devdata *dd, *tmp;
  349. unsigned long flags;
  350. mutex_lock(&ipath_layer_mutex);
  351. layer_add_one = l_add;
  352. layer_remove_one = l_remove;
  353. layer_intr = l_intr;
  354. layer_rcv = l_rcv;
  355. layer_rcv_lid = l_rcv_lid;
  356. ipath_layer_rcv_opcode = l_rcv_opcode;
  357. spin_lock_irqsave(&ipath_devs_lock, flags);
  358. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  359. if (!(dd->ipath_flags & IPATH_INITTED))
  360. continue;
  361. if (dd->ipath_layer.l_arg)
  362. continue;
  363. if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
  364. *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  365. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  366. dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
  367. spin_lock_irqsave(&ipath_devs_lock, flags);
  368. }
  369. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  370. mutex_unlock(&ipath_layer_mutex);
  371. return 0;
  372. }
  373. EXPORT_SYMBOL_GPL(ipath_layer_register);
  374. void ipath_layer_unregister(void)
  375. {
  376. struct ipath_devdata *dd, *tmp;
  377. unsigned long flags;
  378. mutex_lock(&ipath_layer_mutex);
  379. spin_lock_irqsave(&ipath_devs_lock, flags);
  380. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  381. if (dd->ipath_layer.l_arg && layer_remove_one) {
  382. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  383. layer_remove_one(dd->ipath_layer.l_arg);
  384. spin_lock_irqsave(&ipath_devs_lock, flags);
  385. dd->ipath_layer.l_arg = NULL;
  386. }
  387. }
  388. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  389. layer_add_one = NULL;
  390. layer_remove_one = NULL;
  391. layer_intr = NULL;
  392. layer_rcv = NULL;
  393. layer_rcv_lid = NULL;
  394. mutex_unlock(&ipath_layer_mutex);
  395. }
  396. EXPORT_SYMBOL_GPL(ipath_layer_unregister);
  397. static void __ipath_verbs_timer(unsigned long arg)
  398. {
  399. struct ipath_devdata *dd = (struct ipath_devdata *) arg;
  400. /*
  401. * If port 0 receive packet interrupts are not available, or
  402. * can be missed, poll the receive queue
  403. */
  404. if (dd->ipath_flags & IPATH_POLL_RX_INTR)
  405. ipath_kreceive(dd);
  406. /* Handle verbs layer timeouts. */
  407. if (dd->verbs_layer.l_arg && verbs_timer_cb)
  408. verbs_timer_cb(dd->verbs_layer.l_arg);
  409. mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
  410. }
  411. /**
  412. * ipath_verbs_register - verbs layer registration
  413. * @l_piobufavail: callback for when PIO buffers become available
  414. * @l_rcv: callback for receiving a packet
  415. * @l_timer_cb: timer callback
  416. * @ipath_devdata: device data structure is put here
  417. */
  418. int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
  419. void (*l_remove)(void *arg),
  420. int (*l_piobufavail) (void *arg),
  421. void (*l_rcv) (void *arg, void *rhdr,
  422. void *data, u32 tlen),
  423. void (*l_timer_cb) (void *arg))
  424. {
  425. struct ipath_devdata *dd, *tmp;
  426. unsigned long flags;
  427. mutex_lock(&ipath_layer_mutex);
  428. verbs_add_one = l_add;
  429. verbs_remove_one = l_remove;
  430. verbs_piobufavail = l_piobufavail;
  431. verbs_rcv = l_rcv;
  432. verbs_timer_cb = l_timer_cb;
  433. spin_lock_irqsave(&ipath_devs_lock, flags);
  434. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  435. if (!(dd->ipath_flags & IPATH_INITTED))
  436. continue;
  437. if (dd->verbs_layer.l_arg)
  438. continue;
  439. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  440. dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
  441. spin_lock_irqsave(&ipath_devs_lock, flags);
  442. }
  443. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  444. mutex_unlock(&ipath_layer_mutex);
  445. ipath_verbs_registered = 1;
  446. return 0;
  447. }
  448. EXPORT_SYMBOL_GPL(ipath_verbs_register);
  449. void ipath_verbs_unregister(void)
  450. {
  451. struct ipath_devdata *dd, *tmp;
  452. unsigned long flags;
  453. mutex_lock(&ipath_layer_mutex);
  454. spin_lock_irqsave(&ipath_devs_lock, flags);
  455. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  456. *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  457. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  458. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  459. verbs_remove_one(dd->verbs_layer.l_arg);
  460. spin_lock_irqsave(&ipath_devs_lock, flags);
  461. dd->verbs_layer.l_arg = NULL;
  462. }
  463. }
  464. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  465. verbs_add_one = NULL;
  466. verbs_remove_one = NULL;
  467. verbs_piobufavail = NULL;
  468. verbs_rcv = NULL;
  469. verbs_timer_cb = NULL;
  470. ipath_verbs_registered = 0;
  471. mutex_unlock(&ipath_layer_mutex);
  472. }
  473. EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
  474. int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
  475. {
  476. int ret;
  477. u32 intval = 0;
  478. mutex_lock(&ipath_layer_mutex);
  479. if (!dd->ipath_layer.l_arg) {
  480. ret = -EINVAL;
  481. goto bail;
  482. }
  483. ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
  484. if (ret < 0)
  485. goto bail;
  486. *pktmax = dd->ipath_ibmaxlen;
  487. if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
  488. intval |= IPATH_LAYER_INT_IF_UP;
  489. if (ipath_stats.sps_lid[dd->ipath_unit])
  490. intval |= IPATH_LAYER_INT_LID;
  491. if (ipath_stats.sps_mlid[dd->ipath_unit])
  492. intval |= IPATH_LAYER_INT_BCAST;
  493. /*
  494. * do this on open, in case low level is already up and
  495. * just layered driver was reloaded, etc.
  496. */
  497. if (intval)
  498. layer_intr(dd->ipath_layer.l_arg, intval);
  499. ret = 0;
  500. bail:
  501. mutex_unlock(&ipath_layer_mutex);
  502. return ret;
  503. }
  504. EXPORT_SYMBOL_GPL(ipath_layer_open);
  505. u16 ipath_layer_get_lid(struct ipath_devdata *dd)
  506. {
  507. return dd->ipath_lid;
  508. }
  509. EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
  510. /**
  511. * ipath_layer_get_mac - get the MAC address
  512. * @dd: the infinipath device
  513. * @mac: the MAC is put here
  514. *
  515. * This is the EUID-64 OUI octets (top 3), then
  516. * skip the next 2 (which should both be zero or 0xff).
  517. * The returned MAC is in network order
  518. * mac points to at least 6 bytes of buffer
  519. * We assume that by the time the LID is set, that the GUID is as valid
  520. * as it's ever going to be, rather than adding yet another status bit.
  521. */
  522. int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
  523. {
  524. u8 *guid;
  525. guid = (u8 *) &dd->ipath_guid;
  526. mac[0] = guid[0];
  527. mac[1] = guid[1];
  528. mac[2] = guid[2];
  529. mac[3] = guid[5];
  530. mac[4] = guid[6];
  531. mac[5] = guid[7];
  532. if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
  533. ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
  534. "%x %x\n", guid[3], guid[4]);
  535. return 0;
  536. }
  537. EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
  538. u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
  539. {
  540. return dd->ipath_mlid;
  541. }
  542. EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
  543. u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
  544. {
  545. return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
  546. }
  547. EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
  548. static void update_sge(struct ipath_sge_state *ss, u32 length)
  549. {
  550. struct ipath_sge *sge = &ss->sge;
  551. sge->vaddr += length;
  552. sge->length -= length;
  553. sge->sge_length -= length;
  554. if (sge->sge_length == 0) {
  555. if (--ss->num_sge)
  556. *sge = *ss->sg_list++;
  557. } else if (sge->length == 0 && sge->mr != NULL) {
  558. if (++sge->n >= IPATH_SEGSZ) {
  559. if (++sge->m >= sge->mr->mapsz)
  560. return;
  561. sge->n = 0;
  562. }
  563. sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
  564. sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
  565. }
  566. }
  567. #ifdef __LITTLE_ENDIAN
  568. static inline u32 get_upper_bits(u32 data, u32 shift)
  569. {
  570. return data >> shift;
  571. }
  572. static inline u32 set_upper_bits(u32 data, u32 shift)
  573. {
  574. return data << shift;
  575. }
  576. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  577. {
  578. data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
  579. data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  580. return data;
  581. }
  582. #else
  583. static inline u32 get_upper_bits(u32 data, u32 shift)
  584. {
  585. return data << shift;
  586. }
  587. static inline u32 set_upper_bits(u32 data, u32 shift)
  588. {
  589. return data >> shift;
  590. }
  591. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  592. {
  593. data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
  594. data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  595. return data;
  596. }
  597. #endif
  598. static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
  599. u32 length)
  600. {
  601. u32 extra = 0;
  602. u32 data = 0;
  603. u32 last;
  604. while (1) {
  605. u32 len = ss->sge.length;
  606. u32 off;
  607. BUG_ON(len == 0);
  608. if (len > length)
  609. len = length;
  610. if (len > ss->sge.sge_length)
  611. len = ss->sge.sge_length;
  612. /* If the source address is not aligned, try to align it. */
  613. off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
  614. if (off) {
  615. u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
  616. ~(sizeof(u32) - 1));
  617. u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
  618. u32 y;
  619. y = sizeof(u32) - off;
  620. if (len > y)
  621. len = y;
  622. if (len + extra >= sizeof(u32)) {
  623. data |= set_upper_bits(v, extra *
  624. BITS_PER_BYTE);
  625. len = sizeof(u32) - extra;
  626. if (len == length) {
  627. last = data;
  628. break;
  629. }
  630. __raw_writel(data, piobuf);
  631. piobuf++;
  632. extra = 0;
  633. data = 0;
  634. } else {
  635. /* Clear unused upper bytes */
  636. data |= clear_upper_bytes(v, len, extra);
  637. if (len == length) {
  638. last = data;
  639. break;
  640. }
  641. extra += len;
  642. }
  643. } else if (extra) {
  644. /* Source address is aligned. */
  645. u32 *addr = (u32 *) ss->sge.vaddr;
  646. int shift = extra * BITS_PER_BYTE;
  647. int ushift = 32 - shift;
  648. u32 l = len;
  649. while (l >= sizeof(u32)) {
  650. u32 v = *addr;
  651. data |= set_upper_bits(v, shift);
  652. __raw_writel(data, piobuf);
  653. data = get_upper_bits(v, ushift);
  654. piobuf++;
  655. addr++;
  656. l -= sizeof(u32);
  657. }
  658. /*
  659. * We still have 'extra' number of bytes leftover.
  660. */
  661. if (l) {
  662. u32 v = *addr;
  663. if (l + extra >= sizeof(u32)) {
  664. data |= set_upper_bits(v, shift);
  665. len -= l + extra - sizeof(u32);
  666. if (len == length) {
  667. last = data;
  668. break;
  669. }
  670. __raw_writel(data, piobuf);
  671. piobuf++;
  672. extra = 0;
  673. data = 0;
  674. } else {
  675. /* Clear unused upper bytes */
  676. data |= clear_upper_bytes(v, l,
  677. extra);
  678. if (len == length) {
  679. last = data;
  680. break;
  681. }
  682. extra += l;
  683. }
  684. } else if (len == length) {
  685. last = data;
  686. break;
  687. }
  688. } else if (len == length) {
  689. u32 w;
  690. /*
  691. * Need to round up for the last dword in the
  692. * packet.
  693. */
  694. w = (len + 3) >> 2;
  695. __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
  696. piobuf += w - 1;
  697. last = ((u32 *) ss->sge.vaddr)[w - 1];
  698. break;
  699. } else {
  700. u32 w = len >> 2;
  701. __iowrite32_copy(piobuf, ss->sge.vaddr, w);
  702. piobuf += w;
  703. extra = len & (sizeof(u32) - 1);
  704. if (extra) {
  705. u32 v = ((u32 *) ss->sge.vaddr)[w];
  706. /* Clear unused upper bytes */
  707. data = clear_upper_bytes(v, extra, 0);
  708. }
  709. }
  710. update_sge(ss, len);
  711. length -= len;
  712. }
  713. /* must flush early everything before trigger word */
  714. ipath_flush_wc();
  715. __raw_writel(last, piobuf);
  716. /* be sure trigger word is written */
  717. ipath_flush_wc();
  718. update_sge(ss, length);
  719. }
  720. /**
  721. * ipath_verbs_send - send a packet from the verbs layer
  722. * @dd: the infinipath device
  723. * @hdrwords: the number of works in the header
  724. * @hdr: the packet header
  725. * @len: the length of the packet in bytes
  726. * @ss: the SGE to send
  727. *
  728. * This is like ipath_sma_send_pkt() in that we need to be able to send
  729. * packets after the chip is initialized (MADs) but also like
  730. * ipath_layer_send_hdr() since its used by the verbs layer.
  731. */
  732. int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
  733. u32 *hdr, u32 len, struct ipath_sge_state *ss)
  734. {
  735. u32 __iomem *piobuf;
  736. u32 plen;
  737. int ret;
  738. /* +1 is for the qword padding of pbc */
  739. plen = hdrwords + ((len + 3) >> 2) + 1;
  740. if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
  741. ipath_dbg("packet len 0x%x too long, failing\n", plen);
  742. ret = -EINVAL;
  743. goto bail;
  744. }
  745. /* Get a PIO buffer to use. */
  746. piobuf = ipath_getpiobuf(dd, NULL);
  747. if (unlikely(piobuf == NULL)) {
  748. ret = -EBUSY;
  749. goto bail;
  750. }
  751. /*
  752. * Write len to control qword, no flags.
  753. * We have to flush after the PBC for correctness on some cpus
  754. * or WC buffer can be written out of order.
  755. */
  756. writeq(plen, piobuf);
  757. ipath_flush_wc();
  758. piobuf += 2;
  759. if (len == 0) {
  760. /*
  761. * If there is just the header portion, must flush before
  762. * writing last word of header for correctness, and after
  763. * the last header word (trigger word).
  764. */
  765. __iowrite32_copy(piobuf, hdr, hdrwords - 1);
  766. ipath_flush_wc();
  767. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
  768. ipath_flush_wc();
  769. ret = 0;
  770. goto bail;
  771. }
  772. __iowrite32_copy(piobuf, hdr, hdrwords);
  773. piobuf += hdrwords;
  774. /* The common case is aligned and contained in one segment. */
  775. if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
  776. !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
  777. u32 w;
  778. /* Need to round up for the last dword in the packet. */
  779. w = (len + 3) >> 2;
  780. __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
  781. /* must flush early everything before trigger word */
  782. ipath_flush_wc();
  783. __raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
  784. piobuf + w - 1);
  785. /* be sure trigger word is written */
  786. ipath_flush_wc();
  787. update_sge(ss, len);
  788. ret = 0;
  789. goto bail;
  790. }
  791. copy_io(piobuf, ss, len);
  792. ret = 0;
  793. bail:
  794. return ret;
  795. }
  796. EXPORT_SYMBOL_GPL(ipath_verbs_send);
  797. int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
  798. u64 *rwords, u64 *spkts, u64 *rpkts,
  799. u64 *xmit_wait)
  800. {
  801. int ret;
  802. if (!(dd->ipath_flags & IPATH_INITTED)) {
  803. /* no hardware, freeze, etc. */
  804. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  805. ret = -EINVAL;
  806. goto bail;
  807. }
  808. *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  809. *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  810. *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  811. *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  812. *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
  813. ret = 0;
  814. bail:
  815. return ret;
  816. }
  817. EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
  818. /**
  819. * ipath_layer_get_counters - get various chip counters
  820. * @dd: the infinipath device
  821. * @cntrs: counters are placed here
  822. *
  823. * Return the counters needed by recv_pma_get_portcounters().
  824. */
  825. int ipath_layer_get_counters(struct ipath_devdata *dd,
  826. struct ipath_layer_counters *cntrs)
  827. {
  828. int ret;
  829. if (!(dd->ipath_flags & IPATH_INITTED)) {
  830. /* no hardware, freeze, etc. */
  831. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  832. ret = -EINVAL;
  833. goto bail;
  834. }
  835. cntrs->symbol_error_counter =
  836. ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
  837. cntrs->link_error_recovery_counter =
  838. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
  839. cntrs->link_downed_counter =
  840. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
  841. cntrs->port_rcv_errors =
  842. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
  843. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
  844. ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
  845. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
  846. ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
  847. ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
  848. ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
  849. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
  850. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
  851. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
  852. ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
  853. cntrs->port_rcv_remphys_errors =
  854. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
  855. cntrs->port_xmit_discards =
  856. ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
  857. cntrs->port_xmit_data =
  858. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  859. cntrs->port_rcv_data =
  860. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  861. cntrs->port_xmit_packets =
  862. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  863. cntrs->port_rcv_packets =
  864. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  865. ret = 0;
  866. bail:
  867. return ret;
  868. }
  869. EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
  870. int ipath_layer_want_buffer(struct ipath_devdata *dd)
  871. {
  872. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  873. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  874. dd->ipath_sendctrl);
  875. return 0;
  876. }
  877. EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
  878. int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
  879. {
  880. int ret = 0;
  881. u32 __iomem *piobuf;
  882. u32 plen, *uhdr;
  883. size_t count;
  884. __be16 vlsllnh;
  885. if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
  886. ipath_dbg("send while not open\n");
  887. ret = -EINVAL;
  888. } else
  889. if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
  890. dd->ipath_lid == 0) {
  891. /*
  892. * lid check is for when sma hasn't yet configured
  893. */
  894. ret = -ENETDOWN;
  895. ipath_cdbg(VERBOSE, "send while not ready, "
  896. "mylid=%u, flags=0x%x\n",
  897. dd->ipath_lid, dd->ipath_flags);
  898. }
  899. vlsllnh = *((__be16 *) hdr);
  900. if (vlsllnh != htons(IPS_LRH_BTH)) {
  901. ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
  902. "not sending\n", be16_to_cpu(vlsllnh),
  903. IPS_LRH_BTH);
  904. ret = -EINVAL;
  905. }
  906. if (ret)
  907. goto done;
  908. /* Get a PIO buffer to use. */
  909. piobuf = ipath_getpiobuf(dd, NULL);
  910. if (piobuf == NULL) {
  911. ret = -EBUSY;
  912. goto done;
  913. }
  914. plen = (sizeof(*hdr) >> 2); /* actual length */
  915. ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
  916. writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
  917. ipath_flush_wc();
  918. piobuf += 2;
  919. uhdr = (u32 *)hdr;
  920. count = plen-1; /* amount we can copy before trigger word */
  921. __iowrite32_copy(piobuf, uhdr, count);
  922. ipath_flush_wc();
  923. __raw_writel(uhdr[count], piobuf + count);
  924. ipath_flush_wc(); /* ensure it's sent, now */
  925. ipath_stats.sps_ether_spkts++; /* ether packet sent */
  926. done:
  927. return ret;
  928. }
  929. EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
  930. int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
  931. {
  932. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  933. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  934. dd->ipath_sendctrl);
  935. return 0;
  936. }
  937. EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
  938. int ipath_layer_enable_timer(struct ipath_devdata *dd)
  939. {
  940. /*
  941. * HT-400 has a design flaw where the chip and kernel idea
  942. * of the tail register don't always agree, and therefore we won't
  943. * get an interrupt on the next packet received.
  944. * If the board supports per packet receive interrupts, use it.
  945. * Otherwise, the timer function periodically checks for packets
  946. * to cover this case.
  947. * Either way, the timer is needed for verbs layer related
  948. * processing.
  949. */
  950. if (dd->ipath_flags & IPATH_GPIO_INTR) {
  951. ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
  952. 0x2074076542310ULL);
  953. /* Enable GPIO bit 2 interrupt */
  954. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
  955. (u64) (1 << 2));
  956. }
  957. init_timer(&dd->verbs_layer.l_timer);
  958. dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
  959. dd->verbs_layer.l_timer.data = (unsigned long)dd;
  960. dd->verbs_layer.l_timer.expires = jiffies + 1;
  961. add_timer(&dd->verbs_layer.l_timer);
  962. return 0;
  963. }
  964. EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
  965. int ipath_layer_disable_timer(struct ipath_devdata *dd)
  966. {
  967. /* Disable GPIO bit 2 interrupt */
  968. if (dd->ipath_flags & IPATH_GPIO_INTR)
  969. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
  970. del_timer_sync(&dd->verbs_layer.l_timer);
  971. return 0;
  972. }
  973. EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
  974. /**
  975. * ipath_layer_set_verbs_flags - set the verbs layer flags
  976. * @dd: the infinipath device
  977. * @flags: the flags to set
  978. */
  979. int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
  980. {
  981. struct ipath_devdata *ss;
  982. unsigned long lflags;
  983. spin_lock_irqsave(&ipath_devs_lock, lflags);
  984. list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
  985. if (!(ss->ipath_flags & IPATH_INITTED))
  986. continue;
  987. if ((flags & IPATH_VERBS_KERNEL_SMA) &&
  988. !(*ss->ipath_statusp & IPATH_STATUS_SMA))
  989. *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  990. else
  991. *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  992. }
  993. spin_unlock_irqrestore(&ipath_devs_lock, lflags);
  994. return 0;
  995. }
  996. EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
  997. /**
  998. * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
  999. * @dd: the infinipath device
  1000. */
  1001. unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
  1002. {
  1003. return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
  1004. }
  1005. EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
  1006. /**
  1007. * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
  1008. * @dd: the infinipath device
  1009. * @index: the PKEY index
  1010. */
  1011. unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
  1012. {
  1013. unsigned ret;
  1014. if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
  1015. ret = 0;
  1016. else
  1017. ret = dd->ipath_pd[0]->port_pkeys[index];
  1018. return ret;
  1019. }
  1020. EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
  1021. /**
  1022. * ipath_layer_get_pkeys - return the PKEY table for port 0
  1023. * @dd: the infinipath device
  1024. * @pkeys: the pkey table is placed here
  1025. */
  1026. int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1027. {
  1028. struct ipath_portdata *pd = dd->ipath_pd[0];
  1029. memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
  1030. return 0;
  1031. }
  1032. EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
  1033. /**
  1034. * rm_pkey - decrecment the reference count for the given PKEY
  1035. * @dd: the infinipath device
  1036. * @key: the PKEY index
  1037. *
  1038. * Return true if this was the last reference and the hardware table entry
  1039. * needs to be changed.
  1040. */
  1041. static int rm_pkey(struct ipath_devdata *dd, u16 key)
  1042. {
  1043. int i;
  1044. int ret;
  1045. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1046. if (dd->ipath_pkeys[i] != key)
  1047. continue;
  1048. if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
  1049. dd->ipath_pkeys[i] = 0;
  1050. ret = 1;
  1051. goto bail;
  1052. }
  1053. break;
  1054. }
  1055. ret = 0;
  1056. bail:
  1057. return ret;
  1058. }
  1059. /**
  1060. * add_pkey - add the given PKEY to the hardware table
  1061. * @dd: the infinipath device
  1062. * @key: the PKEY
  1063. *
  1064. * Return an error code if unable to add the entry, zero if no change,
  1065. * or 1 if the hardware PKEY register needs to be updated.
  1066. */
  1067. static int add_pkey(struct ipath_devdata *dd, u16 key)
  1068. {
  1069. int i;
  1070. u16 lkey = key & 0x7FFF;
  1071. int any = 0;
  1072. int ret;
  1073. if (lkey == 0x7FFF) {
  1074. ret = 0;
  1075. goto bail;
  1076. }
  1077. /* Look for an empty slot or a matching PKEY. */
  1078. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1079. if (!dd->ipath_pkeys[i]) {
  1080. any++;
  1081. continue;
  1082. }
  1083. /* If it matches exactly, try to increment the ref count */
  1084. if (dd->ipath_pkeys[i] == key) {
  1085. if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
  1086. ret = 0;
  1087. goto bail;
  1088. }
  1089. /* Lost the race. Look for an empty slot below. */
  1090. atomic_dec(&dd->ipath_pkeyrefs[i]);
  1091. any++;
  1092. }
  1093. /*
  1094. * It makes no sense to have both the limited and unlimited
  1095. * PKEY set at the same time since the unlimited one will
  1096. * disable the limited one.
  1097. */
  1098. if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
  1099. ret = -EEXIST;
  1100. goto bail;
  1101. }
  1102. }
  1103. if (!any) {
  1104. ret = -EBUSY;
  1105. goto bail;
  1106. }
  1107. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1108. if (!dd->ipath_pkeys[i] &&
  1109. atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
  1110. /* for ipathstats, etc. */
  1111. ipath_stats.sps_pkeys[i] = lkey;
  1112. dd->ipath_pkeys[i] = key;
  1113. ret = 1;
  1114. goto bail;
  1115. }
  1116. }
  1117. ret = -EBUSY;
  1118. bail:
  1119. return ret;
  1120. }
  1121. /**
  1122. * ipath_layer_set_pkeys - set the PKEY table for port 0
  1123. * @dd: the infinipath device
  1124. * @pkeys: the PKEY table
  1125. */
  1126. int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1127. {
  1128. struct ipath_portdata *pd;
  1129. int i;
  1130. int changed = 0;
  1131. pd = dd->ipath_pd[0];
  1132. for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
  1133. u16 key = pkeys[i];
  1134. u16 okey = pd->port_pkeys[i];
  1135. if (key == okey)
  1136. continue;
  1137. /*
  1138. * The value of this PKEY table entry is changing.
  1139. * Remove the old entry in the hardware's array of PKEYs.
  1140. */
  1141. if (okey & 0x7FFF)
  1142. changed |= rm_pkey(dd, okey);
  1143. if (key & 0x7FFF) {
  1144. int ret = add_pkey(dd, key);
  1145. if (ret < 0)
  1146. key = 0;
  1147. else
  1148. changed |= ret;
  1149. }
  1150. pd->port_pkeys[i] = key;
  1151. }
  1152. if (changed) {
  1153. u64 pkey;
  1154. pkey = (u64) dd->ipath_pkeys[0] |
  1155. ((u64) dd->ipath_pkeys[1] << 16) |
  1156. ((u64) dd->ipath_pkeys[2] << 32) |
  1157. ((u64) dd->ipath_pkeys[3] << 48);
  1158. ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
  1159. (unsigned long long) pkey);
  1160. ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
  1161. pkey);
  1162. }
  1163. return 0;
  1164. }
  1165. EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
  1166. /**
  1167. * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
  1168. * @dd: the infinipath device
  1169. *
  1170. * Returns zero if the default is POLL, 1 if the default is SLEEP.
  1171. */
  1172. int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
  1173. {
  1174. return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
  1175. }
  1176. EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
  1177. /**
  1178. * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
  1179. * @dd: the infinipath device
  1180. * @sleep: the new state
  1181. *
  1182. * Note that this will only take effect when the link state changes.
  1183. */
  1184. int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
  1185. int sleep)
  1186. {
  1187. if (sleep)
  1188. dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1189. else
  1190. dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1191. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1192. dd->ipath_ibcctrl);
  1193. return 0;
  1194. }
  1195. EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
  1196. int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
  1197. {
  1198. return (dd->ipath_ibcctrl >>
  1199. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1200. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1201. }
  1202. EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
  1203. /**
  1204. * ipath_layer_set_phyerrthreshold - set the physical error threshold
  1205. * @dd: the infinipath device
  1206. * @n: the new threshold
  1207. *
  1208. * Note that this will only take effect when the link state changes.
  1209. */
  1210. int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
  1211. {
  1212. unsigned v;
  1213. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1214. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1215. if (v != n) {
  1216. dd->ipath_ibcctrl &=
  1217. ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
  1218. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
  1219. dd->ipath_ibcctrl |=
  1220. (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
  1221. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1222. dd->ipath_ibcctrl);
  1223. }
  1224. return 0;
  1225. }
  1226. EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
  1227. int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
  1228. {
  1229. return (dd->ipath_ibcctrl >>
  1230. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1231. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1232. }
  1233. EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
  1234. /**
  1235. * ipath_layer_set_overrunthreshold - set the overrun threshold
  1236. * @dd: the infinipath device
  1237. * @n: the new threshold
  1238. *
  1239. * Note that this will only take effect when the link state changes.
  1240. */
  1241. int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
  1242. {
  1243. unsigned v;
  1244. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1245. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1246. if (v != n) {
  1247. dd->ipath_ibcctrl &=
  1248. ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
  1249. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
  1250. dd->ipath_ibcctrl |=
  1251. (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
  1252. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1253. dd->ipath_ibcctrl);
  1254. }
  1255. return 0;
  1256. }
  1257. EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
  1258. int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
  1259. size_t namelen)
  1260. {
  1261. return dd->ipath_f_get_boardname(dd, name, namelen);
  1262. }
  1263. EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
  1264. u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
  1265. {
  1266. return dd->ipath_rcvhdrentsize;
  1267. }
  1268. EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);