ipath_layer.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541
  1. /*
  2. * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  3. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * These are the routines used by layered drivers, currently just the
  35. * layered ethernet driver and verbs layer.
  36. */
  37. #include <linux/io.h>
  38. #include <linux/pci.h>
  39. #include <asm/byteorder.h>
  40. #include "ipath_kernel.h"
  41. #include "ipath_layer.h"
  42. #include "ipath_common.h"
  43. /* Acquire before ipath_devs_lock. */
  44. static DEFINE_MUTEX(ipath_layer_mutex);
  45. static int ipath_verbs_registered;
  46. u16 ipath_layer_rcv_opcode;
  47. static int (*layer_intr)(void *, u32);
  48. static int (*layer_rcv)(void *, void *, struct sk_buff *);
  49. static int (*layer_rcv_lid)(void *, void *);
  50. static int (*verbs_piobufavail)(void *);
  51. static void (*verbs_rcv)(void *, void *, void *, u32);
  52. static void *(*layer_add_one)(int, struct ipath_devdata *);
  53. static void (*layer_remove_one)(void *);
  54. static void *(*verbs_add_one)(int, struct ipath_devdata *);
  55. static void (*verbs_remove_one)(void *);
  56. static void (*verbs_timer_cb)(void *);
  57. int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  58. {
  59. int ret = -ENODEV;
  60. if (dd->ipath_layer.l_arg && layer_intr)
  61. ret = layer_intr(dd->ipath_layer.l_arg, arg);
  62. return ret;
  63. }
  64. int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  65. {
  66. int ret;
  67. mutex_lock(&ipath_layer_mutex);
  68. ret = __ipath_layer_intr(dd, arg);
  69. mutex_unlock(&ipath_layer_mutex);
  70. return ret;
  71. }
  72. int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
  73. struct sk_buff *skb)
  74. {
  75. int ret = -ENODEV;
  76. if (dd->ipath_layer.l_arg && layer_rcv)
  77. ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
  78. return ret;
  79. }
  80. int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
  81. {
  82. int ret = -ENODEV;
  83. if (dd->ipath_layer.l_arg && layer_rcv_lid)
  84. ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
  85. return ret;
  86. }
  87. int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
  88. {
  89. int ret = -ENODEV;
  90. if (dd->verbs_layer.l_arg && verbs_piobufavail)
  91. ret = verbs_piobufavail(dd->verbs_layer.l_arg);
  92. return ret;
  93. }
  94. int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
  95. u32 tlen)
  96. {
  97. int ret = -ENODEV;
  98. if (dd->verbs_layer.l_arg && verbs_rcv) {
  99. verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
  100. ret = 0;
  101. }
  102. return ret;
  103. }
  104. int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
  105. {
  106. u32 lstate;
  107. int ret;
  108. switch (newstate) {
  109. case IPATH_IB_LINKDOWN:
  110. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
  111. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  112. /* don't wait */
  113. ret = 0;
  114. goto bail;
  115. case IPATH_IB_LINKDOWN_SLEEP:
  116. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
  117. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  118. /* don't wait */
  119. ret = 0;
  120. goto bail;
  121. case IPATH_IB_LINKDOWN_DISABLE:
  122. ipath_set_ib_lstate(dd,
  123. INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
  124. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  125. /* don't wait */
  126. ret = 0;
  127. goto bail;
  128. case IPATH_IB_LINKINIT:
  129. if (dd->ipath_flags & IPATH_LINKINIT) {
  130. ret = 0;
  131. goto bail;
  132. }
  133. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
  134. INFINIPATH_IBCC_LINKCMD_SHIFT);
  135. lstate = IPATH_LINKINIT;
  136. break;
  137. case IPATH_IB_LINKARM:
  138. if (dd->ipath_flags & IPATH_LINKARMED) {
  139. ret = 0;
  140. goto bail;
  141. }
  142. if (!(dd->ipath_flags &
  143. (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
  144. ret = -EINVAL;
  145. goto bail;
  146. }
  147. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
  148. INFINIPATH_IBCC_LINKCMD_SHIFT);
  149. /*
  150. * Since the port can transition to ACTIVE by receiving
  151. * a non VL 15 packet, wait for either state.
  152. */
  153. lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
  154. break;
  155. case IPATH_IB_LINKACTIVE:
  156. if (dd->ipath_flags & IPATH_LINKACTIVE) {
  157. ret = 0;
  158. goto bail;
  159. }
  160. if (!(dd->ipath_flags & IPATH_LINKARMED)) {
  161. ret = -EINVAL;
  162. goto bail;
  163. }
  164. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
  165. INFINIPATH_IBCC_LINKCMD_SHIFT);
  166. lstate = IPATH_LINKACTIVE;
  167. break;
  168. default:
  169. ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
  170. ret = -EINVAL;
  171. goto bail;
  172. }
  173. ret = ipath_wait_linkstate(dd, lstate, 2000);
  174. bail:
  175. return ret;
  176. }
  177. EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
  178. /**
  179. * ipath_layer_set_mtu - set the MTU
  180. * @dd: the infinipath device
  181. * @arg: the new MTU
  182. *
  183. * we can handle "any" incoming size, the issue here is whether we
  184. * need to restrict our outgoing size. For now, we don't do any
  185. * sanity checking on this, and we don't deal with what happens to
  186. * programs that are already running when the size changes.
  187. * NOTE: changing the MTU will usually cause the IBC to go back to
  188. * link initialize (IPATH_IBSTATE_INIT) state...
  189. */
  190. int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
  191. {
  192. u32 piosize;
  193. int changed = 0;
  194. int ret;
  195. /*
  196. * mtu is IB data payload max. It's the largest power of 2 less
  197. * than piosize (or even larger, since it only really controls the
  198. * largest we can receive; we can send the max of the mtu and
  199. * piosize). We check that it's one of the valid IB sizes.
  200. */
  201. if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
  202. arg != 4096) {
  203. ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
  204. ret = -EINVAL;
  205. goto bail;
  206. }
  207. if (dd->ipath_ibmtu == arg) {
  208. ret = 0; /* same as current */
  209. goto bail;
  210. }
  211. piosize = dd->ipath_ibmaxlen;
  212. dd->ipath_ibmtu = arg;
  213. if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
  214. /* Only if it's not the initial value (or reset to it) */
  215. if (piosize != dd->ipath_init_ibmaxlen) {
  216. dd->ipath_ibmaxlen = piosize;
  217. changed = 1;
  218. }
  219. } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
  220. piosize = arg + IPATH_PIO_MAXIBHDR;
  221. ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
  222. "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
  223. arg);
  224. dd->ipath_ibmaxlen = piosize;
  225. changed = 1;
  226. }
  227. if (changed) {
  228. /*
  229. * set the IBC maxpktlength to the size of our pio
  230. * buffers in words
  231. */
  232. u64 ibc = dd->ipath_ibcctrl;
  233. ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
  234. INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
  235. piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
  236. dd->ipath_ibmaxlen = piosize;
  237. piosize /= sizeof(u32); /* in words */
  238. /*
  239. * for ICRC, which we only send in diag test pkt mode, and
  240. * we don't need to worry about that for mtu
  241. */
  242. piosize += 1;
  243. ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
  244. dd->ipath_ibcctrl = ibc;
  245. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  246. dd->ipath_ibcctrl);
  247. dd->ipath_f_tidtemplate(dd);
  248. }
  249. ret = 0;
  250. bail:
  251. return ret;
  252. }
  253. EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
  254. int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
  255. {
  256. dd->ipath_lid = arg;
  257. dd->ipath_lmc = lmc;
  258. mutex_lock(&ipath_layer_mutex);
  259. if (dd->ipath_layer.l_arg && layer_intr)
  260. layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
  261. mutex_unlock(&ipath_layer_mutex);
  262. return 0;
  263. }
  264. EXPORT_SYMBOL_GPL(ipath_set_lid);
  265. int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
  266. {
  267. /* XXX - need to inform anyone who cares this just happened. */
  268. dd->ipath_guid = guid;
  269. return 0;
  270. }
  271. EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
  272. __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
  273. {
  274. return dd->ipath_guid;
  275. }
  276. EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
  277. u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
  278. {
  279. return dd->ipath_nguid;
  280. }
  281. EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
  282. u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
  283. {
  284. return dd->ipath_majrev;
  285. }
  286. EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
  287. u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
  288. {
  289. return dd->ipath_minrev;
  290. }
  291. EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
  292. u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
  293. {
  294. return dd->ipath_pcirev;
  295. }
  296. EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
  297. u32 ipath_layer_get_flags(struct ipath_devdata *dd)
  298. {
  299. return dd->ipath_flags;
  300. }
  301. EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
  302. struct device *ipath_layer_get_device(struct ipath_devdata *dd)
  303. {
  304. return &dd->pcidev->dev;
  305. }
  306. EXPORT_SYMBOL_GPL(ipath_layer_get_device);
  307. u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
  308. {
  309. return dd->ipath_deviceid;
  310. }
  311. EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
  312. u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
  313. {
  314. return dd->ipath_vendorid;
  315. }
  316. EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
  317. u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
  318. {
  319. return dd->ipath_lastibcstat;
  320. }
  321. EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
  322. u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
  323. {
  324. return dd->ipath_ibmtu;
  325. }
  326. EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
  327. void ipath_layer_add(struct ipath_devdata *dd)
  328. {
  329. mutex_lock(&ipath_layer_mutex);
  330. if (layer_add_one)
  331. dd->ipath_layer.l_arg =
  332. layer_add_one(dd->ipath_unit, dd);
  333. if (verbs_add_one)
  334. dd->verbs_layer.l_arg =
  335. verbs_add_one(dd->ipath_unit, dd);
  336. mutex_unlock(&ipath_layer_mutex);
  337. }
  338. void ipath_layer_remove(struct ipath_devdata *dd)
  339. {
  340. mutex_lock(&ipath_layer_mutex);
  341. if (dd->ipath_layer.l_arg && layer_remove_one) {
  342. layer_remove_one(dd->ipath_layer.l_arg);
  343. dd->ipath_layer.l_arg = NULL;
  344. }
  345. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  346. verbs_remove_one(dd->verbs_layer.l_arg);
  347. dd->verbs_layer.l_arg = NULL;
  348. }
  349. mutex_unlock(&ipath_layer_mutex);
  350. }
  351. int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
  352. void (*l_remove)(void *),
  353. int (*l_intr)(void *, u32),
  354. int (*l_rcv)(void *, void *, struct sk_buff *),
  355. u16 l_rcv_opcode,
  356. int (*l_rcv_lid)(void *, void *))
  357. {
  358. struct ipath_devdata *dd, *tmp;
  359. unsigned long flags;
  360. mutex_lock(&ipath_layer_mutex);
  361. layer_add_one = l_add;
  362. layer_remove_one = l_remove;
  363. layer_intr = l_intr;
  364. layer_rcv = l_rcv;
  365. layer_rcv_lid = l_rcv_lid;
  366. ipath_layer_rcv_opcode = l_rcv_opcode;
  367. spin_lock_irqsave(&ipath_devs_lock, flags);
  368. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  369. if (!(dd->ipath_flags & IPATH_INITTED))
  370. continue;
  371. if (dd->ipath_layer.l_arg)
  372. continue;
  373. if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
  374. *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  375. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  376. dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
  377. spin_lock_irqsave(&ipath_devs_lock, flags);
  378. }
  379. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  380. mutex_unlock(&ipath_layer_mutex);
  381. return 0;
  382. }
  383. EXPORT_SYMBOL_GPL(ipath_layer_register);
  384. void ipath_layer_unregister(void)
  385. {
  386. struct ipath_devdata *dd, *tmp;
  387. unsigned long flags;
  388. mutex_lock(&ipath_layer_mutex);
  389. spin_lock_irqsave(&ipath_devs_lock, flags);
  390. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  391. if (dd->ipath_layer.l_arg && layer_remove_one) {
  392. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  393. layer_remove_one(dd->ipath_layer.l_arg);
  394. spin_lock_irqsave(&ipath_devs_lock, flags);
  395. dd->ipath_layer.l_arg = NULL;
  396. }
  397. }
  398. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  399. layer_add_one = NULL;
  400. layer_remove_one = NULL;
  401. layer_intr = NULL;
  402. layer_rcv = NULL;
  403. layer_rcv_lid = NULL;
  404. mutex_unlock(&ipath_layer_mutex);
  405. }
  406. EXPORT_SYMBOL_GPL(ipath_layer_unregister);
  407. static void __ipath_verbs_timer(unsigned long arg)
  408. {
  409. struct ipath_devdata *dd = (struct ipath_devdata *) arg;
  410. /*
  411. * If port 0 receive packet interrupts are not available, or
  412. * can be missed, poll the receive queue
  413. */
  414. if (dd->ipath_flags & IPATH_POLL_RX_INTR)
  415. ipath_kreceive(dd);
  416. /* Handle verbs layer timeouts. */
  417. if (dd->verbs_layer.l_arg && verbs_timer_cb)
  418. verbs_timer_cb(dd->verbs_layer.l_arg);
  419. mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
  420. }
  421. /**
  422. * ipath_verbs_register - verbs layer registration
  423. * @l_piobufavail: callback for when PIO buffers become available
  424. * @l_rcv: callback for receiving a packet
  425. * @l_timer_cb: timer callback
  426. * @ipath_devdata: device data structure is put here
  427. */
  428. int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
  429. void (*l_remove)(void *arg),
  430. int (*l_piobufavail) (void *arg),
  431. void (*l_rcv) (void *arg, void *rhdr,
  432. void *data, u32 tlen),
  433. void (*l_timer_cb) (void *arg))
  434. {
  435. struct ipath_devdata *dd, *tmp;
  436. unsigned long flags;
  437. mutex_lock(&ipath_layer_mutex);
  438. verbs_add_one = l_add;
  439. verbs_remove_one = l_remove;
  440. verbs_piobufavail = l_piobufavail;
  441. verbs_rcv = l_rcv;
  442. verbs_timer_cb = l_timer_cb;
  443. spin_lock_irqsave(&ipath_devs_lock, flags);
  444. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  445. if (!(dd->ipath_flags & IPATH_INITTED))
  446. continue;
  447. if (dd->verbs_layer.l_arg)
  448. continue;
  449. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  450. dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
  451. spin_lock_irqsave(&ipath_devs_lock, flags);
  452. }
  453. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  454. mutex_unlock(&ipath_layer_mutex);
  455. ipath_verbs_registered = 1;
  456. return 0;
  457. }
  458. EXPORT_SYMBOL_GPL(ipath_verbs_register);
  459. void ipath_verbs_unregister(void)
  460. {
  461. struct ipath_devdata *dd, *tmp;
  462. unsigned long flags;
  463. mutex_lock(&ipath_layer_mutex);
  464. spin_lock_irqsave(&ipath_devs_lock, flags);
  465. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  466. *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  467. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  468. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  469. verbs_remove_one(dd->verbs_layer.l_arg);
  470. spin_lock_irqsave(&ipath_devs_lock, flags);
  471. dd->verbs_layer.l_arg = NULL;
  472. }
  473. }
  474. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  475. verbs_add_one = NULL;
  476. verbs_remove_one = NULL;
  477. verbs_piobufavail = NULL;
  478. verbs_rcv = NULL;
  479. verbs_timer_cb = NULL;
  480. ipath_verbs_registered = 0;
  481. mutex_unlock(&ipath_layer_mutex);
  482. }
  483. EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
  484. int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
  485. {
  486. int ret;
  487. u32 intval = 0;
  488. mutex_lock(&ipath_layer_mutex);
  489. if (!dd->ipath_layer.l_arg) {
  490. ret = -EINVAL;
  491. goto bail;
  492. }
  493. ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
  494. if (ret < 0)
  495. goto bail;
  496. *pktmax = dd->ipath_ibmaxlen;
  497. if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
  498. intval |= IPATH_LAYER_INT_IF_UP;
  499. if (dd->ipath_lid)
  500. intval |= IPATH_LAYER_INT_LID;
  501. if (dd->ipath_mlid)
  502. intval |= IPATH_LAYER_INT_BCAST;
  503. /*
  504. * do this on open, in case low level is already up and
  505. * just layered driver was reloaded, etc.
  506. */
  507. if (intval)
  508. layer_intr(dd->ipath_layer.l_arg, intval);
  509. ret = 0;
  510. bail:
  511. mutex_unlock(&ipath_layer_mutex);
  512. return ret;
  513. }
  514. EXPORT_SYMBOL_GPL(ipath_layer_open);
  515. u16 ipath_layer_get_lid(struct ipath_devdata *dd)
  516. {
  517. return dd->ipath_lid;
  518. }
  519. EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
  520. /**
  521. * ipath_layer_get_mac - get the MAC address
  522. * @dd: the infinipath device
  523. * @mac: the MAC is put here
  524. *
  525. * This is the EUID-64 OUI octets (top 3), then
  526. * skip the next 2 (which should both be zero or 0xff).
  527. * The returned MAC is in network order
  528. * mac points to at least 6 bytes of buffer
  529. * We assume that by the time the LID is set, that the GUID is as valid
  530. * as it's ever going to be, rather than adding yet another status bit.
  531. */
  532. int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
  533. {
  534. u8 *guid;
  535. guid = (u8 *) &dd->ipath_guid;
  536. mac[0] = guid[0];
  537. mac[1] = guid[1];
  538. mac[2] = guid[2];
  539. mac[3] = guid[5];
  540. mac[4] = guid[6];
  541. mac[5] = guid[7];
  542. if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
  543. ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
  544. "%x %x\n", guid[3], guid[4]);
  545. return 0;
  546. }
  547. EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
  548. u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
  549. {
  550. return dd->ipath_mlid;
  551. }
  552. EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
  553. u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
  554. {
  555. return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
  556. }
  557. EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
  558. static void update_sge(struct ipath_sge_state *ss, u32 length)
  559. {
  560. struct ipath_sge *sge = &ss->sge;
  561. sge->vaddr += length;
  562. sge->length -= length;
  563. sge->sge_length -= length;
  564. if (sge->sge_length == 0) {
  565. if (--ss->num_sge)
  566. *sge = *ss->sg_list++;
  567. } else if (sge->length == 0 && sge->mr != NULL) {
  568. if (++sge->n >= IPATH_SEGSZ) {
  569. if (++sge->m >= sge->mr->mapsz)
  570. return;
  571. sge->n = 0;
  572. }
  573. sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
  574. sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
  575. }
  576. }
  577. #ifdef __LITTLE_ENDIAN
  578. static inline u32 get_upper_bits(u32 data, u32 shift)
  579. {
  580. return data >> shift;
  581. }
  582. static inline u32 set_upper_bits(u32 data, u32 shift)
  583. {
  584. return data << shift;
  585. }
  586. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  587. {
  588. data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
  589. data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  590. return data;
  591. }
  592. #else
  593. static inline u32 get_upper_bits(u32 data, u32 shift)
  594. {
  595. return data << shift;
  596. }
  597. static inline u32 set_upper_bits(u32 data, u32 shift)
  598. {
  599. return data >> shift;
  600. }
  601. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  602. {
  603. data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
  604. data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  605. return data;
  606. }
  607. #endif
  608. static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
  609. u32 length)
  610. {
  611. u32 extra = 0;
  612. u32 data = 0;
  613. u32 last;
  614. while (1) {
  615. u32 len = ss->sge.length;
  616. u32 off;
  617. BUG_ON(len == 0);
  618. if (len > length)
  619. len = length;
  620. if (len > ss->sge.sge_length)
  621. len = ss->sge.sge_length;
  622. /* If the source address is not aligned, try to align it. */
  623. off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
  624. if (off) {
  625. u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
  626. ~(sizeof(u32) - 1));
  627. u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
  628. u32 y;
  629. y = sizeof(u32) - off;
  630. if (len > y)
  631. len = y;
  632. if (len + extra >= sizeof(u32)) {
  633. data |= set_upper_bits(v, extra *
  634. BITS_PER_BYTE);
  635. len = sizeof(u32) - extra;
  636. if (len == length) {
  637. last = data;
  638. break;
  639. }
  640. __raw_writel(data, piobuf);
  641. piobuf++;
  642. extra = 0;
  643. data = 0;
  644. } else {
  645. /* Clear unused upper bytes */
  646. data |= clear_upper_bytes(v, len, extra);
  647. if (len == length) {
  648. last = data;
  649. break;
  650. }
  651. extra += len;
  652. }
  653. } else if (extra) {
  654. /* Source address is aligned. */
  655. u32 *addr = (u32 *) ss->sge.vaddr;
  656. int shift = extra * BITS_PER_BYTE;
  657. int ushift = 32 - shift;
  658. u32 l = len;
  659. while (l >= sizeof(u32)) {
  660. u32 v = *addr;
  661. data |= set_upper_bits(v, shift);
  662. __raw_writel(data, piobuf);
  663. data = get_upper_bits(v, ushift);
  664. piobuf++;
  665. addr++;
  666. l -= sizeof(u32);
  667. }
  668. /*
  669. * We still have 'extra' number of bytes leftover.
  670. */
  671. if (l) {
  672. u32 v = *addr;
  673. if (l + extra >= sizeof(u32)) {
  674. data |= set_upper_bits(v, shift);
  675. len -= l + extra - sizeof(u32);
  676. if (len == length) {
  677. last = data;
  678. break;
  679. }
  680. __raw_writel(data, piobuf);
  681. piobuf++;
  682. extra = 0;
  683. data = 0;
  684. } else {
  685. /* Clear unused upper bytes */
  686. data |= clear_upper_bytes(v, l,
  687. extra);
  688. if (len == length) {
  689. last = data;
  690. break;
  691. }
  692. extra += l;
  693. }
  694. } else if (len == length) {
  695. last = data;
  696. break;
  697. }
  698. } else if (len == length) {
  699. u32 w;
  700. /*
  701. * Need to round up for the last dword in the
  702. * packet.
  703. */
  704. w = (len + 3) >> 2;
  705. __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
  706. piobuf += w - 1;
  707. last = ((u32 *) ss->sge.vaddr)[w - 1];
  708. break;
  709. } else {
  710. u32 w = len >> 2;
  711. __iowrite32_copy(piobuf, ss->sge.vaddr, w);
  712. piobuf += w;
  713. extra = len & (sizeof(u32) - 1);
  714. if (extra) {
  715. u32 v = ((u32 *) ss->sge.vaddr)[w];
  716. /* Clear unused upper bytes */
  717. data = clear_upper_bytes(v, extra, 0);
  718. }
  719. }
  720. update_sge(ss, len);
  721. length -= len;
  722. }
  723. /* Update address before sending packet. */
  724. update_sge(ss, length);
  725. /* must flush early everything before trigger word */
  726. ipath_flush_wc();
  727. __raw_writel(last, piobuf);
  728. /* be sure trigger word is written */
  729. ipath_flush_wc();
  730. }
  731. /**
  732. * ipath_verbs_send - send a packet from the verbs layer
  733. * @dd: the infinipath device
  734. * @hdrwords: the number of words in the header
  735. * @hdr: the packet header
  736. * @len: the length of the packet in bytes
  737. * @ss: the SGE to send
  738. *
  739. * This is like ipath_sma_send_pkt() in that we need to be able to send
  740. * packets after the chip is initialized (MADs) but also like
  741. * ipath_layer_send_hdr() since its used by the verbs layer.
  742. */
  743. int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
  744. u32 *hdr, u32 len, struct ipath_sge_state *ss)
  745. {
  746. u32 __iomem *piobuf;
  747. u32 plen;
  748. int ret;
  749. /* +1 is for the qword padding of pbc */
  750. plen = hdrwords + ((len + 3) >> 2) + 1;
  751. if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
  752. ipath_dbg("packet len 0x%x too long, failing\n", plen);
  753. ret = -EINVAL;
  754. goto bail;
  755. }
  756. /* Get a PIO buffer to use. */
  757. piobuf = ipath_getpiobuf(dd, NULL);
  758. if (unlikely(piobuf == NULL)) {
  759. ret = -EBUSY;
  760. goto bail;
  761. }
  762. /*
  763. * Write len to control qword, no flags.
  764. * We have to flush after the PBC for correctness on some cpus
  765. * or WC buffer can be written out of order.
  766. */
  767. writeq(plen, piobuf);
  768. ipath_flush_wc();
  769. piobuf += 2;
  770. if (len == 0) {
  771. /*
  772. * If there is just the header portion, must flush before
  773. * writing last word of header for correctness, and after
  774. * the last header word (trigger word).
  775. */
  776. __iowrite32_copy(piobuf, hdr, hdrwords - 1);
  777. ipath_flush_wc();
  778. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
  779. ipath_flush_wc();
  780. ret = 0;
  781. goto bail;
  782. }
  783. __iowrite32_copy(piobuf, hdr, hdrwords);
  784. piobuf += hdrwords;
  785. /* The common case is aligned and contained in one segment. */
  786. if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
  787. !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
  788. u32 w;
  789. u32 *addr = (u32 *) ss->sge.vaddr;
  790. /* Update address before sending packet. */
  791. update_sge(ss, len);
  792. /* Need to round up for the last dword in the packet. */
  793. w = (len + 3) >> 2;
  794. __iowrite32_copy(piobuf, addr, w - 1);
  795. /* must flush early everything before trigger word */
  796. ipath_flush_wc();
  797. __raw_writel(addr[w - 1], piobuf + w - 1);
  798. /* be sure trigger word is written */
  799. ipath_flush_wc();
  800. ret = 0;
  801. goto bail;
  802. }
  803. copy_io(piobuf, ss, len);
  804. ret = 0;
  805. bail:
  806. return ret;
  807. }
  808. EXPORT_SYMBOL_GPL(ipath_verbs_send);
  809. int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
  810. u64 *rwords, u64 *spkts, u64 *rpkts,
  811. u64 *xmit_wait)
  812. {
  813. int ret;
  814. if (!(dd->ipath_flags & IPATH_INITTED)) {
  815. /* no hardware, freeze, etc. */
  816. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  817. ret = -EINVAL;
  818. goto bail;
  819. }
  820. *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  821. *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  822. *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  823. *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  824. *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
  825. ret = 0;
  826. bail:
  827. return ret;
  828. }
  829. EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
  830. /**
  831. * ipath_layer_get_counters - get various chip counters
  832. * @dd: the infinipath device
  833. * @cntrs: counters are placed here
  834. *
  835. * Return the counters needed by recv_pma_get_portcounters().
  836. */
  837. int ipath_layer_get_counters(struct ipath_devdata *dd,
  838. struct ipath_layer_counters *cntrs)
  839. {
  840. int ret;
  841. if (!(dd->ipath_flags & IPATH_INITTED)) {
  842. /* no hardware, freeze, etc. */
  843. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  844. ret = -EINVAL;
  845. goto bail;
  846. }
  847. cntrs->symbol_error_counter =
  848. ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
  849. cntrs->link_error_recovery_counter =
  850. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
  851. /*
  852. * The link downed counter counts when the other side downs the
  853. * connection. We add in the number of times we downed the link
  854. * due to local link integrity errors to compensate.
  855. */
  856. cntrs->link_downed_counter =
  857. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
  858. cntrs->port_rcv_errors =
  859. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
  860. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
  861. ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
  862. ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
  863. ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
  864. ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
  865. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
  866. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
  867. ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
  868. cntrs->port_rcv_remphys_errors =
  869. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
  870. cntrs->port_xmit_discards =
  871. ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
  872. cntrs->port_xmit_data =
  873. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  874. cntrs->port_rcv_data =
  875. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  876. cntrs->port_xmit_packets =
  877. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  878. cntrs->port_rcv_packets =
  879. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  880. cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
  881. cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
  882. ret = 0;
  883. bail:
  884. return ret;
  885. }
  886. EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
  887. int ipath_layer_want_buffer(struct ipath_devdata *dd)
  888. {
  889. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  890. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  891. dd->ipath_sendctrl);
  892. return 0;
  893. }
  894. EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
  895. int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
  896. {
  897. int ret = 0;
  898. u32 __iomem *piobuf;
  899. u32 plen, *uhdr;
  900. size_t count;
  901. __be16 vlsllnh;
  902. if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
  903. ipath_dbg("send while not open\n");
  904. ret = -EINVAL;
  905. } else
  906. if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
  907. dd->ipath_lid == 0) {
  908. /*
  909. * lid check is for when sma hasn't yet configured
  910. */
  911. ret = -ENETDOWN;
  912. ipath_cdbg(VERBOSE, "send while not ready, "
  913. "mylid=%u, flags=0x%x\n",
  914. dd->ipath_lid, dd->ipath_flags);
  915. }
  916. vlsllnh = *((__be16 *) hdr);
  917. if (vlsllnh != htons(IPATH_LRH_BTH)) {
  918. ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
  919. "not sending\n", be16_to_cpu(vlsllnh),
  920. IPATH_LRH_BTH);
  921. ret = -EINVAL;
  922. }
  923. if (ret)
  924. goto done;
  925. /* Get a PIO buffer to use. */
  926. piobuf = ipath_getpiobuf(dd, NULL);
  927. if (piobuf == NULL) {
  928. ret = -EBUSY;
  929. goto done;
  930. }
  931. plen = (sizeof(*hdr) >> 2); /* actual length */
  932. ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
  933. writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
  934. ipath_flush_wc();
  935. piobuf += 2;
  936. uhdr = (u32 *)hdr;
  937. count = plen-1; /* amount we can copy before trigger word */
  938. __iowrite32_copy(piobuf, uhdr, count);
  939. ipath_flush_wc();
  940. __raw_writel(uhdr[count], piobuf + count);
  941. ipath_flush_wc(); /* ensure it's sent, now */
  942. ipath_stats.sps_ether_spkts++; /* ether packet sent */
  943. done:
  944. return ret;
  945. }
  946. EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
  947. int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
  948. {
  949. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  950. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  951. dd->ipath_sendctrl);
  952. return 0;
  953. }
  954. EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
  955. int ipath_layer_enable_timer(struct ipath_devdata *dd)
  956. {
  957. /*
  958. * HT-400 has a design flaw where the chip and kernel idea
  959. * of the tail register don't always agree, and therefore we won't
  960. * get an interrupt on the next packet received.
  961. * If the board supports per packet receive interrupts, use it.
  962. * Otherwise, the timer function periodically checks for packets
  963. * to cover this case.
  964. * Either way, the timer is needed for verbs layer related
  965. * processing.
  966. */
  967. if (dd->ipath_flags & IPATH_GPIO_INTR) {
  968. ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
  969. 0x2074076542310ULL);
  970. /* Enable GPIO bit 2 interrupt */
  971. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
  972. (u64) (1 << 2));
  973. }
  974. init_timer(&dd->verbs_layer.l_timer);
  975. dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
  976. dd->verbs_layer.l_timer.data = (unsigned long)dd;
  977. dd->verbs_layer.l_timer.expires = jiffies + 1;
  978. add_timer(&dd->verbs_layer.l_timer);
  979. return 0;
  980. }
  981. EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
  982. int ipath_layer_disable_timer(struct ipath_devdata *dd)
  983. {
  984. /* Disable GPIO bit 2 interrupt */
  985. if (dd->ipath_flags & IPATH_GPIO_INTR)
  986. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
  987. del_timer_sync(&dd->verbs_layer.l_timer);
  988. return 0;
  989. }
  990. EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
  991. /**
  992. * ipath_layer_set_verbs_flags - set the verbs layer flags
  993. * @dd: the infinipath device
  994. * @flags: the flags to set
  995. */
  996. int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
  997. {
  998. struct ipath_devdata *ss;
  999. unsigned long lflags;
  1000. spin_lock_irqsave(&ipath_devs_lock, lflags);
  1001. list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
  1002. if (!(ss->ipath_flags & IPATH_INITTED))
  1003. continue;
  1004. if ((flags & IPATH_VERBS_KERNEL_SMA) &&
  1005. !(*ss->ipath_statusp & IPATH_STATUS_SMA))
  1006. *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  1007. else
  1008. *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  1009. }
  1010. spin_unlock_irqrestore(&ipath_devs_lock, lflags);
  1011. return 0;
  1012. }
  1013. EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
  1014. /**
  1015. * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
  1016. * @dd: the infinipath device
  1017. */
  1018. unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
  1019. {
  1020. return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
  1021. }
  1022. EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
  1023. /**
  1024. * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
  1025. * @dd: the infinipath device
  1026. * @index: the PKEY index
  1027. */
  1028. unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
  1029. {
  1030. unsigned ret;
  1031. if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
  1032. ret = 0;
  1033. else
  1034. ret = dd->ipath_pd[0]->port_pkeys[index];
  1035. return ret;
  1036. }
  1037. EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
  1038. /**
  1039. * ipath_layer_get_pkeys - return the PKEY table for port 0
  1040. * @dd: the infinipath device
  1041. * @pkeys: the pkey table is placed here
  1042. */
  1043. int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1044. {
  1045. struct ipath_portdata *pd = dd->ipath_pd[0];
  1046. memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
  1047. return 0;
  1048. }
  1049. EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
  1050. /**
  1051. * rm_pkey - decrecment the reference count for the given PKEY
  1052. * @dd: the infinipath device
  1053. * @key: the PKEY index
  1054. *
  1055. * Return true if this was the last reference and the hardware table entry
  1056. * needs to be changed.
  1057. */
  1058. static int rm_pkey(struct ipath_devdata *dd, u16 key)
  1059. {
  1060. int i;
  1061. int ret;
  1062. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1063. if (dd->ipath_pkeys[i] != key)
  1064. continue;
  1065. if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
  1066. dd->ipath_pkeys[i] = 0;
  1067. ret = 1;
  1068. goto bail;
  1069. }
  1070. break;
  1071. }
  1072. ret = 0;
  1073. bail:
  1074. return ret;
  1075. }
  1076. /**
  1077. * add_pkey - add the given PKEY to the hardware table
  1078. * @dd: the infinipath device
  1079. * @key: the PKEY
  1080. *
  1081. * Return an error code if unable to add the entry, zero if no change,
  1082. * or 1 if the hardware PKEY register needs to be updated.
  1083. */
  1084. static int add_pkey(struct ipath_devdata *dd, u16 key)
  1085. {
  1086. int i;
  1087. u16 lkey = key & 0x7FFF;
  1088. int any = 0;
  1089. int ret;
  1090. if (lkey == 0x7FFF) {
  1091. ret = 0;
  1092. goto bail;
  1093. }
  1094. /* Look for an empty slot or a matching PKEY. */
  1095. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1096. if (!dd->ipath_pkeys[i]) {
  1097. any++;
  1098. continue;
  1099. }
  1100. /* If it matches exactly, try to increment the ref count */
  1101. if (dd->ipath_pkeys[i] == key) {
  1102. if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
  1103. ret = 0;
  1104. goto bail;
  1105. }
  1106. /* Lost the race. Look for an empty slot below. */
  1107. atomic_dec(&dd->ipath_pkeyrefs[i]);
  1108. any++;
  1109. }
  1110. /*
  1111. * It makes no sense to have both the limited and unlimited
  1112. * PKEY set at the same time since the unlimited one will
  1113. * disable the limited one.
  1114. */
  1115. if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
  1116. ret = -EEXIST;
  1117. goto bail;
  1118. }
  1119. }
  1120. if (!any) {
  1121. ret = -EBUSY;
  1122. goto bail;
  1123. }
  1124. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1125. if (!dd->ipath_pkeys[i] &&
  1126. atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
  1127. /* for ipathstats, etc. */
  1128. ipath_stats.sps_pkeys[i] = lkey;
  1129. dd->ipath_pkeys[i] = key;
  1130. ret = 1;
  1131. goto bail;
  1132. }
  1133. }
  1134. ret = -EBUSY;
  1135. bail:
  1136. return ret;
  1137. }
  1138. /**
  1139. * ipath_layer_set_pkeys - set the PKEY table for port 0
  1140. * @dd: the infinipath device
  1141. * @pkeys: the PKEY table
  1142. */
  1143. int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1144. {
  1145. struct ipath_portdata *pd;
  1146. int i;
  1147. int changed = 0;
  1148. pd = dd->ipath_pd[0];
  1149. for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
  1150. u16 key = pkeys[i];
  1151. u16 okey = pd->port_pkeys[i];
  1152. if (key == okey)
  1153. continue;
  1154. /*
  1155. * The value of this PKEY table entry is changing.
  1156. * Remove the old entry in the hardware's array of PKEYs.
  1157. */
  1158. if (okey & 0x7FFF)
  1159. changed |= rm_pkey(dd, okey);
  1160. if (key & 0x7FFF) {
  1161. int ret = add_pkey(dd, key);
  1162. if (ret < 0)
  1163. key = 0;
  1164. else
  1165. changed |= ret;
  1166. }
  1167. pd->port_pkeys[i] = key;
  1168. }
  1169. if (changed) {
  1170. u64 pkey;
  1171. pkey = (u64) dd->ipath_pkeys[0] |
  1172. ((u64) dd->ipath_pkeys[1] << 16) |
  1173. ((u64) dd->ipath_pkeys[2] << 32) |
  1174. ((u64) dd->ipath_pkeys[3] << 48);
  1175. ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
  1176. (unsigned long long) pkey);
  1177. ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
  1178. pkey);
  1179. }
  1180. return 0;
  1181. }
  1182. EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
  1183. /**
  1184. * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
  1185. * @dd: the infinipath device
  1186. *
  1187. * Returns zero if the default is POLL, 1 if the default is SLEEP.
  1188. */
  1189. int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
  1190. {
  1191. return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
  1192. }
  1193. EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
  1194. /**
  1195. * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
  1196. * @dd: the infinipath device
  1197. * @sleep: the new state
  1198. *
  1199. * Note that this will only take effect when the link state changes.
  1200. */
  1201. int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
  1202. int sleep)
  1203. {
  1204. if (sleep)
  1205. dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1206. else
  1207. dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1208. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1209. dd->ipath_ibcctrl);
  1210. return 0;
  1211. }
  1212. EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
  1213. int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
  1214. {
  1215. return (dd->ipath_ibcctrl >>
  1216. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1217. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1218. }
  1219. EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
  1220. /**
  1221. * ipath_layer_set_phyerrthreshold - set the physical error threshold
  1222. * @dd: the infinipath device
  1223. * @n: the new threshold
  1224. *
  1225. * Note that this will only take effect when the link state changes.
  1226. */
  1227. int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
  1228. {
  1229. unsigned v;
  1230. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1231. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1232. if (v != n) {
  1233. dd->ipath_ibcctrl &=
  1234. ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
  1235. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
  1236. dd->ipath_ibcctrl |=
  1237. (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
  1238. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1239. dd->ipath_ibcctrl);
  1240. }
  1241. return 0;
  1242. }
  1243. EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
  1244. int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
  1245. {
  1246. return (dd->ipath_ibcctrl >>
  1247. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1248. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1249. }
  1250. EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
  1251. /**
  1252. * ipath_layer_set_overrunthreshold - set the overrun threshold
  1253. * @dd: the infinipath device
  1254. * @n: the new threshold
  1255. *
  1256. * Note that this will only take effect when the link state changes.
  1257. */
  1258. int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
  1259. {
  1260. unsigned v;
  1261. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1262. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1263. if (v != n) {
  1264. dd->ipath_ibcctrl &=
  1265. ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
  1266. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
  1267. dd->ipath_ibcctrl |=
  1268. (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
  1269. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1270. dd->ipath_ibcctrl);
  1271. }
  1272. return 0;
  1273. }
  1274. EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
  1275. int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
  1276. size_t namelen)
  1277. {
  1278. return dd->ipath_f_get_boardname(dd, name, namelen);
  1279. }
  1280. EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
  1281. u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
  1282. {
  1283. return dd->ipath_rcvhdrentsize;
  1284. }
  1285. EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);