ipath_layer.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. /*
  2. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /*
  33. * These are the routines used by layered drivers, currently just the
  34. * layered ethernet driver and verbs layer.
  35. */
  36. #include <linux/io.h>
  37. #include <linux/pci.h>
  38. #include <asm/byteorder.h>
  39. #include "ipath_kernel.h"
  40. #include "ips_common.h"
  41. #include "ipath_layer.h"
  42. /* Acquire before ipath_devs_lock. */
  43. static DEFINE_MUTEX(ipath_layer_mutex);
  44. static int ipath_verbs_registered;
  45. u16 ipath_layer_rcv_opcode;
  46. static int (*layer_intr)(void *, u32);
  47. static int (*layer_rcv)(void *, void *, struct sk_buff *);
  48. static int (*layer_rcv_lid)(void *, void *);
  49. static int (*verbs_piobufavail)(void *);
  50. static void (*verbs_rcv)(void *, void *, void *, u32);
  51. static void *(*layer_add_one)(int, struct ipath_devdata *);
  52. static void (*layer_remove_one)(void *);
  53. static void *(*verbs_add_one)(int, struct ipath_devdata *);
  54. static void (*verbs_remove_one)(void *);
  55. static void (*verbs_timer_cb)(void *);
  56. int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  57. {
  58. int ret = -ENODEV;
  59. if (dd->ipath_layer.l_arg && layer_intr)
  60. ret = layer_intr(dd->ipath_layer.l_arg, arg);
  61. return ret;
  62. }
  63. int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  64. {
  65. int ret;
  66. mutex_lock(&ipath_layer_mutex);
  67. ret = __ipath_layer_intr(dd, arg);
  68. mutex_unlock(&ipath_layer_mutex);
  69. return ret;
  70. }
  71. int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
  72. struct sk_buff *skb)
  73. {
  74. int ret = -ENODEV;
  75. if (dd->ipath_layer.l_arg && layer_rcv)
  76. ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
  77. return ret;
  78. }
  79. int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
  80. {
  81. int ret = -ENODEV;
  82. if (dd->ipath_layer.l_arg && layer_rcv_lid)
  83. ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
  84. return ret;
  85. }
  86. int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
  87. {
  88. int ret = -ENODEV;
  89. if (dd->verbs_layer.l_arg && verbs_piobufavail)
  90. ret = verbs_piobufavail(dd->verbs_layer.l_arg);
  91. return ret;
  92. }
  93. int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
  94. u32 tlen)
  95. {
  96. int ret = -ENODEV;
  97. if (dd->verbs_layer.l_arg && verbs_rcv) {
  98. verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
  99. ret = 0;
  100. }
  101. return ret;
  102. }
  103. int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
  104. {
  105. u32 lstate;
  106. int ret;
  107. switch (newstate) {
  108. case IPATH_IB_LINKDOWN:
  109. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
  110. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  111. /* don't wait */
  112. ret = 0;
  113. goto bail;
  114. case IPATH_IB_LINKDOWN_SLEEP:
  115. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
  116. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  117. /* don't wait */
  118. ret = 0;
  119. goto bail;
  120. case IPATH_IB_LINKDOWN_DISABLE:
  121. ipath_set_ib_lstate(dd,
  122. INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
  123. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  124. /* don't wait */
  125. ret = 0;
  126. goto bail;
  127. case IPATH_IB_LINKINIT:
  128. if (dd->ipath_flags & IPATH_LINKINIT) {
  129. ret = 0;
  130. goto bail;
  131. }
  132. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
  133. INFINIPATH_IBCC_LINKCMD_SHIFT);
  134. lstate = IPATH_LINKINIT;
  135. break;
  136. case IPATH_IB_LINKARM:
  137. if (dd->ipath_flags & IPATH_LINKARMED) {
  138. ret = 0;
  139. goto bail;
  140. }
  141. if (!(dd->ipath_flags &
  142. (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
  143. ret = -EINVAL;
  144. goto bail;
  145. }
  146. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
  147. INFINIPATH_IBCC_LINKCMD_SHIFT);
  148. /*
  149. * Since the port can transition to ACTIVE by receiving
  150. * a non VL 15 packet, wait for either state.
  151. */
  152. lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
  153. break;
  154. case IPATH_IB_LINKACTIVE:
  155. if (dd->ipath_flags & IPATH_LINKACTIVE) {
  156. ret = 0;
  157. goto bail;
  158. }
  159. if (!(dd->ipath_flags & IPATH_LINKARMED)) {
  160. ret = -EINVAL;
  161. goto bail;
  162. }
  163. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
  164. INFINIPATH_IBCC_LINKCMD_SHIFT);
  165. lstate = IPATH_LINKACTIVE;
  166. break;
  167. default:
  168. ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
  169. ret = -EINVAL;
  170. goto bail;
  171. }
  172. ret = ipath_wait_linkstate(dd, lstate, 2000);
  173. bail:
  174. return ret;
  175. }
  176. EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
  177. /**
  178. * ipath_layer_set_mtu - set the MTU
  179. * @dd: the infinipath device
  180. * @arg: the new MTU
  181. *
  182. * we can handle "any" incoming size, the issue here is whether we
  183. * need to restrict our outgoing size. For now, we don't do any
  184. * sanity checking on this, and we don't deal with what happens to
  185. * programs that are already running when the size changes.
  186. * NOTE: changing the MTU will usually cause the IBC to go back to
  187. * link initialize (IPATH_IBSTATE_INIT) state...
  188. */
  189. int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
  190. {
  191. u32 piosize;
  192. int changed = 0;
  193. int ret;
  194. /*
  195. * mtu is IB data payload max. It's the largest power of 2 less
  196. * than piosize (or even larger, since it only really controls the
  197. * largest we can receive; we can send the max of the mtu and
  198. * piosize). We check that it's one of the valid IB sizes.
  199. */
  200. if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
  201. arg != 4096) {
  202. ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
  203. ret = -EINVAL;
  204. goto bail;
  205. }
  206. if (dd->ipath_ibmtu == arg) {
  207. ret = 0; /* same as current */
  208. goto bail;
  209. }
  210. piosize = dd->ipath_ibmaxlen;
  211. dd->ipath_ibmtu = arg;
  212. if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
  213. /* Only if it's not the initial value (or reset to it) */
  214. if (piosize != dd->ipath_init_ibmaxlen) {
  215. dd->ipath_ibmaxlen = piosize;
  216. changed = 1;
  217. }
  218. } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
  219. piosize = arg + IPATH_PIO_MAXIBHDR;
  220. ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
  221. "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
  222. arg);
  223. dd->ipath_ibmaxlen = piosize;
  224. changed = 1;
  225. }
  226. if (changed) {
  227. /*
  228. * set the IBC maxpktlength to the size of our pio
  229. * buffers in words
  230. */
  231. u64 ibc = dd->ipath_ibcctrl;
  232. ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
  233. INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
  234. piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
  235. dd->ipath_ibmaxlen = piosize;
  236. piosize /= sizeof(u32); /* in words */
  237. /*
  238. * for ICRC, which we only send in diag test pkt mode, and
  239. * we don't need to worry about that for mtu
  240. */
  241. piosize += 1;
  242. ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
  243. dd->ipath_ibcctrl = ibc;
  244. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  245. dd->ipath_ibcctrl);
  246. dd->ipath_f_tidtemplate(dd);
  247. }
  248. ret = 0;
  249. bail:
  250. return ret;
  251. }
  252. EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
  253. int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
  254. {
  255. ipath_stats.sps_lid[dd->ipath_unit] = arg;
  256. dd->ipath_lid = arg;
  257. dd->ipath_lmc = lmc;
  258. mutex_lock(&ipath_layer_mutex);
  259. if (dd->ipath_layer.l_arg && layer_intr)
  260. layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
  261. mutex_unlock(&ipath_layer_mutex);
  262. return 0;
  263. }
  264. EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
  265. int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
  266. {
  267. /* XXX - need to inform anyone who cares this just happened. */
  268. dd->ipath_guid = guid;
  269. return 0;
  270. }
  271. EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
  272. __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
  273. {
  274. return dd->ipath_guid;
  275. }
  276. EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
  277. u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
  278. {
  279. return dd->ipath_nguid;
  280. }
  281. EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
  282. int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
  283. u32 * boardrev, u32 * majrev, u32 * minrev)
  284. {
  285. *vendor = dd->ipath_vendorid;
  286. *boardrev = dd->ipath_boardrev;
  287. *majrev = dd->ipath_majrev;
  288. *minrev = dd->ipath_minrev;
  289. return 0;
  290. }
  291. EXPORT_SYMBOL_GPL(ipath_layer_query_device);
  292. u32 ipath_layer_get_flags(struct ipath_devdata *dd)
  293. {
  294. return dd->ipath_flags;
  295. }
  296. EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
  297. struct device *ipath_layer_get_device(struct ipath_devdata *dd)
  298. {
  299. return &dd->pcidev->dev;
  300. }
  301. EXPORT_SYMBOL_GPL(ipath_layer_get_device);
  302. u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
  303. {
  304. return dd->ipath_deviceid;
  305. }
  306. EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
  307. u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
  308. {
  309. return dd->ipath_lastibcstat;
  310. }
  311. EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
  312. u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
  313. {
  314. return dd->ipath_ibmtu;
  315. }
  316. EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
  317. void ipath_layer_add(struct ipath_devdata *dd)
  318. {
  319. mutex_lock(&ipath_layer_mutex);
  320. if (layer_add_one)
  321. dd->ipath_layer.l_arg =
  322. layer_add_one(dd->ipath_unit, dd);
  323. if (verbs_add_one)
  324. dd->verbs_layer.l_arg =
  325. verbs_add_one(dd->ipath_unit, dd);
  326. mutex_unlock(&ipath_layer_mutex);
  327. }
  328. void ipath_layer_del(struct ipath_devdata *dd)
  329. {
  330. mutex_lock(&ipath_layer_mutex);
  331. if (dd->ipath_layer.l_arg && layer_remove_one) {
  332. layer_remove_one(dd->ipath_layer.l_arg);
  333. dd->ipath_layer.l_arg = NULL;
  334. }
  335. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  336. verbs_remove_one(dd->verbs_layer.l_arg);
  337. dd->verbs_layer.l_arg = NULL;
  338. }
  339. mutex_unlock(&ipath_layer_mutex);
  340. }
  341. int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
  342. void (*l_remove)(void *),
  343. int (*l_intr)(void *, u32),
  344. int (*l_rcv)(void *, void *, struct sk_buff *),
  345. u16 l_rcv_opcode,
  346. int (*l_rcv_lid)(void *, void *))
  347. {
  348. struct ipath_devdata *dd, *tmp;
  349. unsigned long flags;
  350. mutex_lock(&ipath_layer_mutex);
  351. layer_add_one = l_add;
  352. layer_remove_one = l_remove;
  353. layer_intr = l_intr;
  354. layer_rcv = l_rcv;
  355. layer_rcv_lid = l_rcv_lid;
  356. ipath_layer_rcv_opcode = l_rcv_opcode;
  357. spin_lock_irqsave(&ipath_devs_lock, flags);
  358. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  359. if (!(dd->ipath_flags & IPATH_INITTED))
  360. continue;
  361. if (dd->ipath_layer.l_arg)
  362. continue;
  363. if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
  364. *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  365. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  366. dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
  367. spin_lock_irqsave(&ipath_devs_lock, flags);
  368. }
  369. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  370. mutex_unlock(&ipath_layer_mutex);
  371. return 0;
  372. }
  373. EXPORT_SYMBOL_GPL(ipath_layer_register);
  374. void ipath_layer_unregister(void)
  375. {
  376. struct ipath_devdata *dd, *tmp;
  377. unsigned long flags;
  378. mutex_lock(&ipath_layer_mutex);
  379. spin_lock_irqsave(&ipath_devs_lock, flags);
  380. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  381. if (dd->ipath_layer.l_arg && layer_remove_one) {
  382. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  383. layer_remove_one(dd->ipath_layer.l_arg);
  384. spin_lock_irqsave(&ipath_devs_lock, flags);
  385. dd->ipath_layer.l_arg = NULL;
  386. }
  387. }
  388. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  389. layer_add_one = NULL;
  390. layer_remove_one = NULL;
  391. layer_intr = NULL;
  392. layer_rcv = NULL;
  393. layer_rcv_lid = NULL;
  394. mutex_unlock(&ipath_layer_mutex);
  395. }
  396. EXPORT_SYMBOL_GPL(ipath_layer_unregister);
  397. static void __ipath_verbs_timer(unsigned long arg)
  398. {
  399. struct ipath_devdata *dd = (struct ipath_devdata *) arg;
  400. /*
  401. * If port 0 receive packet interrupts are not available, or
  402. * can be missed, poll the receive queue
  403. */
  404. if (dd->ipath_flags & IPATH_POLL_RX_INTR)
  405. ipath_kreceive(dd);
  406. /* Handle verbs layer timeouts. */
  407. if (dd->verbs_layer.l_arg && verbs_timer_cb)
  408. verbs_timer_cb(dd->verbs_layer.l_arg);
  409. mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
  410. }
  411. /**
  412. * ipath_verbs_register - verbs layer registration
  413. * @l_piobufavail: callback for when PIO buffers become available
  414. * @l_rcv: callback for receiving a packet
  415. * @l_timer_cb: timer callback
  416. * @ipath_devdata: device data structure is put here
  417. */
  418. int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
  419. void (*l_remove)(void *arg),
  420. int (*l_piobufavail) (void *arg),
  421. void (*l_rcv) (void *arg, void *rhdr,
  422. void *data, u32 tlen),
  423. void (*l_timer_cb) (void *arg))
  424. {
  425. struct ipath_devdata *dd, *tmp;
  426. unsigned long flags;
  427. mutex_lock(&ipath_layer_mutex);
  428. verbs_add_one = l_add;
  429. verbs_remove_one = l_remove;
  430. verbs_piobufavail = l_piobufavail;
  431. verbs_rcv = l_rcv;
  432. verbs_timer_cb = l_timer_cb;
  433. spin_lock_irqsave(&ipath_devs_lock, flags);
  434. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  435. if (!(dd->ipath_flags & IPATH_INITTED))
  436. continue;
  437. if (dd->verbs_layer.l_arg)
  438. continue;
  439. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  440. dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
  441. spin_lock_irqsave(&ipath_devs_lock, flags);
  442. }
  443. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  444. mutex_unlock(&ipath_layer_mutex);
  445. ipath_verbs_registered = 1;
  446. return 0;
  447. }
  448. EXPORT_SYMBOL_GPL(ipath_verbs_register);
  449. void ipath_verbs_unregister(void)
  450. {
  451. struct ipath_devdata *dd, *tmp;
  452. unsigned long flags;
  453. mutex_lock(&ipath_layer_mutex);
  454. spin_lock_irqsave(&ipath_devs_lock, flags);
  455. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  456. *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  457. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  458. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  459. verbs_remove_one(dd->verbs_layer.l_arg);
  460. spin_lock_irqsave(&ipath_devs_lock, flags);
  461. dd->verbs_layer.l_arg = NULL;
  462. }
  463. }
  464. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  465. verbs_add_one = NULL;
  466. verbs_remove_one = NULL;
  467. verbs_piobufavail = NULL;
  468. verbs_rcv = NULL;
  469. verbs_timer_cb = NULL;
  470. ipath_verbs_registered = 0;
  471. mutex_unlock(&ipath_layer_mutex);
  472. }
  473. EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
  474. int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
  475. {
  476. int ret;
  477. u32 intval = 0;
  478. mutex_lock(&ipath_layer_mutex);
  479. if (!dd->ipath_layer.l_arg) {
  480. ret = -EINVAL;
  481. goto bail;
  482. }
  483. ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
  484. if (ret < 0)
  485. goto bail;
  486. *pktmax = dd->ipath_ibmaxlen;
  487. if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
  488. intval |= IPATH_LAYER_INT_IF_UP;
  489. if (ipath_stats.sps_lid[dd->ipath_unit])
  490. intval |= IPATH_LAYER_INT_LID;
  491. if (ipath_stats.sps_mlid[dd->ipath_unit])
  492. intval |= IPATH_LAYER_INT_BCAST;
  493. /*
  494. * do this on open, in case low level is already up and
  495. * just layered driver was reloaded, etc.
  496. */
  497. if (intval)
  498. layer_intr(dd->ipath_layer.l_arg, intval);
  499. ret = 0;
  500. bail:
  501. mutex_unlock(&ipath_layer_mutex);
  502. return ret;
  503. }
  504. EXPORT_SYMBOL_GPL(ipath_layer_open);
  505. u16 ipath_layer_get_lid(struct ipath_devdata *dd)
  506. {
  507. return dd->ipath_lid;
  508. }
  509. EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
  510. /**
  511. * ipath_layer_get_mac - get the MAC address
  512. * @dd: the infinipath device
  513. * @mac: the MAC is put here
  514. *
  515. * This is the EUID-64 OUI octets (top 3), then
  516. * skip the next 2 (which should both be zero or 0xff).
  517. * The returned MAC is in network order
  518. * mac points to at least 6 bytes of buffer
  519. * We assume that by the time the LID is set, that the GUID is as valid
  520. * as it's ever going to be, rather than adding yet another status bit.
  521. */
  522. int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
  523. {
  524. u8 *guid;
  525. guid = (u8 *) &dd->ipath_guid;
  526. mac[0] = guid[0];
  527. mac[1] = guid[1];
  528. mac[2] = guid[2];
  529. mac[3] = guid[5];
  530. mac[4] = guid[6];
  531. mac[5] = guid[7];
  532. if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
  533. ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
  534. "%x %x\n", guid[3], guid[4]);
  535. return 0;
  536. }
  537. EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
  538. u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
  539. {
  540. return dd->ipath_mlid;
  541. }
  542. EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
  543. u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
  544. {
  545. return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
  546. }
  547. EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
  548. static void update_sge(struct ipath_sge_state *ss, u32 length)
  549. {
  550. struct ipath_sge *sge = &ss->sge;
  551. sge->vaddr += length;
  552. sge->length -= length;
  553. sge->sge_length -= length;
  554. if (sge->sge_length == 0) {
  555. if (--ss->num_sge)
  556. *sge = *ss->sg_list++;
  557. } else if (sge->length == 0 && sge->mr != NULL) {
  558. if (++sge->n >= IPATH_SEGSZ) {
  559. if (++sge->m >= sge->mr->mapsz)
  560. return;
  561. sge->n = 0;
  562. }
  563. sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
  564. sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
  565. }
  566. }
  567. #ifdef __LITTLE_ENDIAN
  568. static inline u32 get_upper_bits(u32 data, u32 shift)
  569. {
  570. return data >> shift;
  571. }
  572. static inline u32 set_upper_bits(u32 data, u32 shift)
  573. {
  574. return data << shift;
  575. }
  576. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  577. {
  578. data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
  579. data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  580. return data;
  581. }
  582. #else
  583. static inline u32 get_upper_bits(u32 data, u32 shift)
  584. {
  585. return data << shift;
  586. }
  587. static inline u32 set_upper_bits(u32 data, u32 shift)
  588. {
  589. return data >> shift;
  590. }
  591. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  592. {
  593. data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
  594. data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  595. return data;
  596. }
  597. #endif
  598. static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
  599. u32 length)
  600. {
  601. u32 extra = 0;
  602. u32 data = 0;
  603. u32 last;
  604. while (1) {
  605. u32 len = ss->sge.length;
  606. u32 off;
  607. BUG_ON(len == 0);
  608. if (len > length)
  609. len = length;
  610. if (len > ss->sge.sge_length)
  611. len = ss->sge.sge_length;
  612. /* If the source address is not aligned, try to align it. */
  613. off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
  614. if (off) {
  615. u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
  616. ~(sizeof(u32) - 1));
  617. u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
  618. u32 y;
  619. y = sizeof(u32) - off;
  620. if (len > y)
  621. len = y;
  622. if (len + extra >= sizeof(u32)) {
  623. data |= set_upper_bits(v, extra *
  624. BITS_PER_BYTE);
  625. len = sizeof(u32) - extra;
  626. if (len == length) {
  627. last = data;
  628. break;
  629. }
  630. __raw_writel(data, piobuf);
  631. piobuf++;
  632. extra = 0;
  633. data = 0;
  634. } else {
  635. /* Clear unused upper bytes */
  636. data |= clear_upper_bytes(v, len, extra);
  637. if (len == length) {
  638. last = data;
  639. break;
  640. }
  641. extra += len;
  642. }
  643. } else if (extra) {
  644. /* Source address is aligned. */
  645. u32 *addr = (u32 *) ss->sge.vaddr;
  646. int shift = extra * BITS_PER_BYTE;
  647. int ushift = 32 - shift;
  648. u32 l = len;
  649. while (l >= sizeof(u32)) {
  650. u32 v = *addr;
  651. data |= set_upper_bits(v, shift);
  652. __raw_writel(data, piobuf);
  653. data = get_upper_bits(v, ushift);
  654. piobuf++;
  655. addr++;
  656. l -= sizeof(u32);
  657. }
  658. /*
  659. * We still have 'extra' number of bytes leftover.
  660. */
  661. if (l) {
  662. u32 v = *addr;
  663. if (l + extra >= sizeof(u32)) {
  664. data |= set_upper_bits(v, shift);
  665. len -= l + extra - sizeof(u32);
  666. if (len == length) {
  667. last = data;
  668. break;
  669. }
  670. __raw_writel(data, piobuf);
  671. piobuf++;
  672. extra = 0;
  673. data = 0;
  674. } else {
  675. /* Clear unused upper bytes */
  676. data |= clear_upper_bytes(v, l,
  677. extra);
  678. if (len == length) {
  679. last = data;
  680. break;
  681. }
  682. extra += l;
  683. }
  684. } else if (len == length) {
  685. last = data;
  686. break;
  687. }
  688. } else if (len == length) {
  689. u32 w;
  690. /*
  691. * Need to round up for the last dword in the
  692. * packet.
  693. */
  694. w = (len + 3) >> 2;
  695. __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
  696. piobuf += w - 1;
  697. last = ((u32 *) ss->sge.vaddr)[w - 1];
  698. break;
  699. } else {
  700. u32 w = len >> 2;
  701. __iowrite32_copy(piobuf, ss->sge.vaddr, w);
  702. piobuf += w;
  703. extra = len & (sizeof(u32) - 1);
  704. if (extra) {
  705. u32 v = ((u32 *) ss->sge.vaddr)[w];
  706. /* Clear unused upper bytes */
  707. data = clear_upper_bytes(v, extra, 0);
  708. }
  709. }
  710. update_sge(ss, len);
  711. length -= len;
  712. }
  713. /* Update address before sending packet. */
  714. update_sge(ss, length);
  715. /* must flush early everything before trigger word */
  716. ipath_flush_wc();
  717. __raw_writel(last, piobuf);
  718. /* be sure trigger word is written */
  719. ipath_flush_wc();
  720. }
  721. /**
  722. * ipath_verbs_send - send a packet from the verbs layer
  723. * @dd: the infinipath device
  724. * @hdrwords: the number of works in the header
  725. * @hdr: the packet header
  726. * @len: the length of the packet in bytes
  727. * @ss: the SGE to send
  728. *
  729. * This is like ipath_sma_send_pkt() in that we need to be able to send
  730. * packets after the chip is initialized (MADs) but also like
  731. * ipath_layer_send_hdr() since its used by the verbs layer.
  732. */
  733. int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
  734. u32 *hdr, u32 len, struct ipath_sge_state *ss)
  735. {
  736. u32 __iomem *piobuf;
  737. u32 plen;
  738. int ret;
  739. /* +1 is for the qword padding of pbc */
  740. plen = hdrwords + ((len + 3) >> 2) + 1;
  741. if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
  742. ipath_dbg("packet len 0x%x too long, failing\n", plen);
  743. ret = -EINVAL;
  744. goto bail;
  745. }
  746. /* Get a PIO buffer to use. */
  747. piobuf = ipath_getpiobuf(dd, NULL);
  748. if (unlikely(piobuf == NULL)) {
  749. ret = -EBUSY;
  750. goto bail;
  751. }
  752. /*
  753. * Write len to control qword, no flags.
  754. * We have to flush after the PBC for correctness on some cpus
  755. * or WC buffer can be written out of order.
  756. */
  757. writeq(plen, piobuf);
  758. ipath_flush_wc();
  759. piobuf += 2;
  760. if (len == 0) {
  761. /*
  762. * If there is just the header portion, must flush before
  763. * writing last word of header for correctness, and after
  764. * the last header word (trigger word).
  765. */
  766. __iowrite32_copy(piobuf, hdr, hdrwords - 1);
  767. ipath_flush_wc();
  768. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
  769. ipath_flush_wc();
  770. ret = 0;
  771. goto bail;
  772. }
  773. __iowrite32_copy(piobuf, hdr, hdrwords);
  774. piobuf += hdrwords;
  775. /* The common case is aligned and contained in one segment. */
  776. if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
  777. !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
  778. u32 w;
  779. u32 *addr = (u32 *) ss->sge.vaddr;
  780. /* Update address before sending packet. */
  781. update_sge(ss, len);
  782. /* Need to round up for the last dword in the packet. */
  783. w = (len + 3) >> 2;
  784. __iowrite32_copy(piobuf, addr, w - 1);
  785. /* must flush early everything before trigger word */
  786. ipath_flush_wc();
  787. __raw_writel(addr[w - 1], piobuf + w - 1);
  788. /* be sure trigger word is written */
  789. ipath_flush_wc();
  790. ret = 0;
  791. goto bail;
  792. }
  793. copy_io(piobuf, ss, len);
  794. ret = 0;
  795. bail:
  796. return ret;
  797. }
  798. EXPORT_SYMBOL_GPL(ipath_verbs_send);
  799. int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
  800. u64 *rwords, u64 *spkts, u64 *rpkts,
  801. u64 *xmit_wait)
  802. {
  803. int ret;
  804. if (!(dd->ipath_flags & IPATH_INITTED)) {
  805. /* no hardware, freeze, etc. */
  806. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  807. ret = -EINVAL;
  808. goto bail;
  809. }
  810. *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  811. *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  812. *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  813. *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  814. *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
  815. ret = 0;
  816. bail:
  817. return ret;
  818. }
  819. EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
  820. /**
  821. * ipath_layer_get_counters - get various chip counters
  822. * @dd: the infinipath device
  823. * @cntrs: counters are placed here
  824. *
  825. * Return the counters needed by recv_pma_get_portcounters().
  826. */
  827. int ipath_layer_get_counters(struct ipath_devdata *dd,
  828. struct ipath_layer_counters *cntrs)
  829. {
  830. int ret;
  831. if (!(dd->ipath_flags & IPATH_INITTED)) {
  832. /* no hardware, freeze, etc. */
  833. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  834. ret = -EINVAL;
  835. goto bail;
  836. }
  837. cntrs->symbol_error_counter =
  838. ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
  839. cntrs->link_error_recovery_counter =
  840. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
  841. cntrs->link_downed_counter =
  842. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
  843. cntrs->port_rcv_errors =
  844. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
  845. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
  846. ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
  847. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
  848. ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
  849. ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
  850. ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
  851. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
  852. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
  853. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
  854. ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
  855. cntrs->port_rcv_remphys_errors =
  856. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
  857. cntrs->port_xmit_discards =
  858. ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
  859. cntrs->port_xmit_data =
  860. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  861. cntrs->port_rcv_data =
  862. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  863. cntrs->port_xmit_packets =
  864. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  865. cntrs->port_rcv_packets =
  866. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  867. ret = 0;
  868. bail:
  869. return ret;
  870. }
  871. EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
  872. int ipath_layer_want_buffer(struct ipath_devdata *dd)
  873. {
  874. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  875. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  876. dd->ipath_sendctrl);
  877. return 0;
  878. }
  879. EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
  880. int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
  881. {
  882. int ret = 0;
  883. u32 __iomem *piobuf;
  884. u32 plen, *uhdr;
  885. size_t count;
  886. __be16 vlsllnh;
  887. if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
  888. ipath_dbg("send while not open\n");
  889. ret = -EINVAL;
  890. } else
  891. if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
  892. dd->ipath_lid == 0) {
  893. /*
  894. * lid check is for when sma hasn't yet configured
  895. */
  896. ret = -ENETDOWN;
  897. ipath_cdbg(VERBOSE, "send while not ready, "
  898. "mylid=%u, flags=0x%x\n",
  899. dd->ipath_lid, dd->ipath_flags);
  900. }
  901. vlsllnh = *((__be16 *) hdr);
  902. if (vlsllnh != htons(IPS_LRH_BTH)) {
  903. ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
  904. "not sending\n", be16_to_cpu(vlsllnh),
  905. IPS_LRH_BTH);
  906. ret = -EINVAL;
  907. }
  908. if (ret)
  909. goto done;
  910. /* Get a PIO buffer to use. */
  911. piobuf = ipath_getpiobuf(dd, NULL);
  912. if (piobuf == NULL) {
  913. ret = -EBUSY;
  914. goto done;
  915. }
  916. plen = (sizeof(*hdr) >> 2); /* actual length */
  917. ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
  918. writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
  919. ipath_flush_wc();
  920. piobuf += 2;
  921. uhdr = (u32 *)hdr;
  922. count = plen-1; /* amount we can copy before trigger word */
  923. __iowrite32_copy(piobuf, uhdr, count);
  924. ipath_flush_wc();
  925. __raw_writel(uhdr[count], piobuf + count);
  926. ipath_flush_wc(); /* ensure it's sent, now */
  927. ipath_stats.sps_ether_spkts++; /* ether packet sent */
  928. done:
  929. return ret;
  930. }
  931. EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
  932. int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
  933. {
  934. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  935. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  936. dd->ipath_sendctrl);
  937. return 0;
  938. }
  939. EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
  940. int ipath_layer_enable_timer(struct ipath_devdata *dd)
  941. {
  942. /*
  943. * HT-400 has a design flaw where the chip and kernel idea
  944. * of the tail register don't always agree, and therefore we won't
  945. * get an interrupt on the next packet received.
  946. * If the board supports per packet receive interrupts, use it.
  947. * Otherwise, the timer function periodically checks for packets
  948. * to cover this case.
  949. * Either way, the timer is needed for verbs layer related
  950. * processing.
  951. */
  952. if (dd->ipath_flags & IPATH_GPIO_INTR) {
  953. ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
  954. 0x2074076542310ULL);
  955. /* Enable GPIO bit 2 interrupt */
  956. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
  957. (u64) (1 << 2));
  958. }
  959. init_timer(&dd->verbs_layer.l_timer);
  960. dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
  961. dd->verbs_layer.l_timer.data = (unsigned long)dd;
  962. dd->verbs_layer.l_timer.expires = jiffies + 1;
  963. add_timer(&dd->verbs_layer.l_timer);
  964. return 0;
  965. }
  966. EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
  967. int ipath_layer_disable_timer(struct ipath_devdata *dd)
  968. {
  969. /* Disable GPIO bit 2 interrupt */
  970. if (dd->ipath_flags & IPATH_GPIO_INTR)
  971. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
  972. del_timer_sync(&dd->verbs_layer.l_timer);
  973. return 0;
  974. }
  975. EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
  976. /**
  977. * ipath_layer_set_verbs_flags - set the verbs layer flags
  978. * @dd: the infinipath device
  979. * @flags: the flags to set
  980. */
  981. int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
  982. {
  983. struct ipath_devdata *ss;
  984. unsigned long lflags;
  985. spin_lock_irqsave(&ipath_devs_lock, lflags);
  986. list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
  987. if (!(ss->ipath_flags & IPATH_INITTED))
  988. continue;
  989. if ((flags & IPATH_VERBS_KERNEL_SMA) &&
  990. !(*ss->ipath_statusp & IPATH_STATUS_SMA))
  991. *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  992. else
  993. *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  994. }
  995. spin_unlock_irqrestore(&ipath_devs_lock, lflags);
  996. return 0;
  997. }
  998. EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
  999. /**
  1000. * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
  1001. * @dd: the infinipath device
  1002. */
  1003. unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
  1004. {
  1005. return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
  1006. }
  1007. EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
  1008. /**
  1009. * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
  1010. * @dd: the infinipath device
  1011. * @index: the PKEY index
  1012. */
  1013. unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
  1014. {
  1015. unsigned ret;
  1016. if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
  1017. ret = 0;
  1018. else
  1019. ret = dd->ipath_pd[0]->port_pkeys[index];
  1020. return ret;
  1021. }
  1022. EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
  1023. /**
  1024. * ipath_layer_get_pkeys - return the PKEY table for port 0
  1025. * @dd: the infinipath device
  1026. * @pkeys: the pkey table is placed here
  1027. */
  1028. int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1029. {
  1030. struct ipath_portdata *pd = dd->ipath_pd[0];
  1031. memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
  1032. return 0;
  1033. }
  1034. EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
  1035. /**
  1036. * rm_pkey - decrecment the reference count for the given PKEY
  1037. * @dd: the infinipath device
  1038. * @key: the PKEY index
  1039. *
  1040. * Return true if this was the last reference and the hardware table entry
  1041. * needs to be changed.
  1042. */
  1043. static int rm_pkey(struct ipath_devdata *dd, u16 key)
  1044. {
  1045. int i;
  1046. int ret;
  1047. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1048. if (dd->ipath_pkeys[i] != key)
  1049. continue;
  1050. if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
  1051. dd->ipath_pkeys[i] = 0;
  1052. ret = 1;
  1053. goto bail;
  1054. }
  1055. break;
  1056. }
  1057. ret = 0;
  1058. bail:
  1059. return ret;
  1060. }
  1061. /**
  1062. * add_pkey - add the given PKEY to the hardware table
  1063. * @dd: the infinipath device
  1064. * @key: the PKEY
  1065. *
  1066. * Return an error code if unable to add the entry, zero if no change,
  1067. * or 1 if the hardware PKEY register needs to be updated.
  1068. */
  1069. static int add_pkey(struct ipath_devdata *dd, u16 key)
  1070. {
  1071. int i;
  1072. u16 lkey = key & 0x7FFF;
  1073. int any = 0;
  1074. int ret;
  1075. if (lkey == 0x7FFF) {
  1076. ret = 0;
  1077. goto bail;
  1078. }
  1079. /* Look for an empty slot or a matching PKEY. */
  1080. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1081. if (!dd->ipath_pkeys[i]) {
  1082. any++;
  1083. continue;
  1084. }
  1085. /* If it matches exactly, try to increment the ref count */
  1086. if (dd->ipath_pkeys[i] == key) {
  1087. if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
  1088. ret = 0;
  1089. goto bail;
  1090. }
  1091. /* Lost the race. Look for an empty slot below. */
  1092. atomic_dec(&dd->ipath_pkeyrefs[i]);
  1093. any++;
  1094. }
  1095. /*
  1096. * It makes no sense to have both the limited and unlimited
  1097. * PKEY set at the same time since the unlimited one will
  1098. * disable the limited one.
  1099. */
  1100. if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
  1101. ret = -EEXIST;
  1102. goto bail;
  1103. }
  1104. }
  1105. if (!any) {
  1106. ret = -EBUSY;
  1107. goto bail;
  1108. }
  1109. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1110. if (!dd->ipath_pkeys[i] &&
  1111. atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
  1112. /* for ipathstats, etc. */
  1113. ipath_stats.sps_pkeys[i] = lkey;
  1114. dd->ipath_pkeys[i] = key;
  1115. ret = 1;
  1116. goto bail;
  1117. }
  1118. }
  1119. ret = -EBUSY;
  1120. bail:
  1121. return ret;
  1122. }
  1123. /**
  1124. * ipath_layer_set_pkeys - set the PKEY table for port 0
  1125. * @dd: the infinipath device
  1126. * @pkeys: the PKEY table
  1127. */
  1128. int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1129. {
  1130. struct ipath_portdata *pd;
  1131. int i;
  1132. int changed = 0;
  1133. pd = dd->ipath_pd[0];
  1134. for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
  1135. u16 key = pkeys[i];
  1136. u16 okey = pd->port_pkeys[i];
  1137. if (key == okey)
  1138. continue;
  1139. /*
  1140. * The value of this PKEY table entry is changing.
  1141. * Remove the old entry in the hardware's array of PKEYs.
  1142. */
  1143. if (okey & 0x7FFF)
  1144. changed |= rm_pkey(dd, okey);
  1145. if (key & 0x7FFF) {
  1146. int ret = add_pkey(dd, key);
  1147. if (ret < 0)
  1148. key = 0;
  1149. else
  1150. changed |= ret;
  1151. }
  1152. pd->port_pkeys[i] = key;
  1153. }
  1154. if (changed) {
  1155. u64 pkey;
  1156. pkey = (u64) dd->ipath_pkeys[0] |
  1157. ((u64) dd->ipath_pkeys[1] << 16) |
  1158. ((u64) dd->ipath_pkeys[2] << 32) |
  1159. ((u64) dd->ipath_pkeys[3] << 48);
  1160. ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
  1161. (unsigned long long) pkey);
  1162. ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
  1163. pkey);
  1164. }
  1165. return 0;
  1166. }
  1167. EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
  1168. /**
  1169. * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
  1170. * @dd: the infinipath device
  1171. *
  1172. * Returns zero if the default is POLL, 1 if the default is SLEEP.
  1173. */
  1174. int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
  1175. {
  1176. return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
  1177. }
  1178. EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
  1179. /**
  1180. * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
  1181. * @dd: the infinipath device
  1182. * @sleep: the new state
  1183. *
  1184. * Note that this will only take effect when the link state changes.
  1185. */
  1186. int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
  1187. int sleep)
  1188. {
  1189. if (sleep)
  1190. dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1191. else
  1192. dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1193. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1194. dd->ipath_ibcctrl);
  1195. return 0;
  1196. }
  1197. EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
  1198. int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
  1199. {
  1200. return (dd->ipath_ibcctrl >>
  1201. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1202. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1203. }
  1204. EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
  1205. /**
  1206. * ipath_layer_set_phyerrthreshold - set the physical error threshold
  1207. * @dd: the infinipath device
  1208. * @n: the new threshold
  1209. *
  1210. * Note that this will only take effect when the link state changes.
  1211. */
  1212. int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
  1213. {
  1214. unsigned v;
  1215. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1216. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1217. if (v != n) {
  1218. dd->ipath_ibcctrl &=
  1219. ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
  1220. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
  1221. dd->ipath_ibcctrl |=
  1222. (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
  1223. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1224. dd->ipath_ibcctrl);
  1225. }
  1226. return 0;
  1227. }
  1228. EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
  1229. int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
  1230. {
  1231. return (dd->ipath_ibcctrl >>
  1232. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1233. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1234. }
  1235. EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
  1236. /**
  1237. * ipath_layer_set_overrunthreshold - set the overrun threshold
  1238. * @dd: the infinipath device
  1239. * @n: the new threshold
  1240. *
  1241. * Note that this will only take effect when the link state changes.
  1242. */
  1243. int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
  1244. {
  1245. unsigned v;
  1246. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1247. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1248. if (v != n) {
  1249. dd->ipath_ibcctrl &=
  1250. ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
  1251. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
  1252. dd->ipath_ibcctrl |=
  1253. (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
  1254. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1255. dd->ipath_ibcctrl);
  1256. }
  1257. return 0;
  1258. }
  1259. EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
  1260. int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
  1261. size_t namelen)
  1262. {
  1263. return dd->ipath_f_get_boardname(dd, name, namelen);
  1264. }
  1265. EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
  1266. u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
  1267. {
  1268. return dd->ipath_rcvhdrentsize;
  1269. }
  1270. EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);