ipath_layer.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515
  1. /*
  2. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /*
  33. * These are the routines used by layered drivers, currently just the
  34. * layered ethernet driver and verbs layer.
  35. */
  36. #include <linux/io.h>
  37. #include <linux/pci.h>
  38. #include <asm/byteorder.h>
  39. #include "ipath_kernel.h"
  40. #include "ips_common.h"
  41. #include "ipath_layer.h"
  42. /* Acquire before ipath_devs_lock. */
  43. static DEFINE_MUTEX(ipath_layer_mutex);
  44. u16 ipath_layer_rcv_opcode;
  45. static int (*layer_intr)(void *, u32);
  46. static int (*layer_rcv)(void *, void *, struct sk_buff *);
  47. static int (*layer_rcv_lid)(void *, void *);
  48. static int (*verbs_piobufavail)(void *);
  49. static void (*verbs_rcv)(void *, void *, void *, u32);
  50. static int ipath_verbs_registered;
  51. static void *(*layer_add_one)(int, struct ipath_devdata *);
  52. static void (*layer_remove_one)(void *);
  53. static void *(*verbs_add_one)(int, struct ipath_devdata *);
  54. static void (*verbs_remove_one)(void *);
  55. static void (*verbs_timer_cb)(void *);
  56. int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  57. {
  58. int ret = -ENODEV;
  59. if (dd->ipath_layer.l_arg && layer_intr)
  60. ret = layer_intr(dd->ipath_layer.l_arg, arg);
  61. return ret;
  62. }
  63. int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
  64. {
  65. int ret;
  66. mutex_lock(&ipath_layer_mutex);
  67. ret = __ipath_layer_intr(dd, arg);
  68. mutex_unlock(&ipath_layer_mutex);
  69. return ret;
  70. }
  71. int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
  72. struct sk_buff *skb)
  73. {
  74. int ret = -ENODEV;
  75. if (dd->ipath_layer.l_arg && layer_rcv)
  76. ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
  77. return ret;
  78. }
  79. int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
  80. {
  81. int ret = -ENODEV;
  82. if (dd->ipath_layer.l_arg && layer_rcv_lid)
  83. ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
  84. return ret;
  85. }
  86. int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
  87. {
  88. int ret = -ENODEV;
  89. if (dd->verbs_layer.l_arg && verbs_piobufavail)
  90. ret = verbs_piobufavail(dd->verbs_layer.l_arg);
  91. return ret;
  92. }
  93. int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
  94. u32 tlen)
  95. {
  96. int ret = -ENODEV;
  97. if (dd->verbs_layer.l_arg && verbs_rcv) {
  98. verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
  99. ret = 0;
  100. }
  101. return ret;
  102. }
  103. int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
  104. {
  105. u32 lstate;
  106. int ret;
  107. switch (newstate) {
  108. case IPATH_IB_LINKDOWN:
  109. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
  110. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  111. /* don't wait */
  112. ret = 0;
  113. goto bail;
  114. case IPATH_IB_LINKDOWN_SLEEP:
  115. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
  116. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  117. /* don't wait */
  118. ret = 0;
  119. goto bail;
  120. case IPATH_IB_LINKDOWN_DISABLE:
  121. ipath_set_ib_lstate(dd,
  122. INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
  123. INFINIPATH_IBCC_LINKINITCMD_SHIFT);
  124. /* don't wait */
  125. ret = 0;
  126. goto bail;
  127. case IPATH_IB_LINKINIT:
  128. if (dd->ipath_flags & IPATH_LINKINIT) {
  129. ret = 0;
  130. goto bail;
  131. }
  132. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
  133. INFINIPATH_IBCC_LINKCMD_SHIFT);
  134. lstate = IPATH_LINKINIT;
  135. break;
  136. case IPATH_IB_LINKARM:
  137. if (dd->ipath_flags & IPATH_LINKARMED) {
  138. ret = 0;
  139. goto bail;
  140. }
  141. if (!(dd->ipath_flags &
  142. (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
  143. ret = -EINVAL;
  144. goto bail;
  145. }
  146. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
  147. INFINIPATH_IBCC_LINKCMD_SHIFT);
  148. /*
  149. * Since the port can transition to ACTIVE by receiving
  150. * a non VL 15 packet, wait for either state.
  151. */
  152. lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
  153. break;
  154. case IPATH_IB_LINKACTIVE:
  155. if (dd->ipath_flags & IPATH_LINKACTIVE) {
  156. ret = 0;
  157. goto bail;
  158. }
  159. if (!(dd->ipath_flags & IPATH_LINKARMED)) {
  160. ret = -EINVAL;
  161. goto bail;
  162. }
  163. ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
  164. INFINIPATH_IBCC_LINKCMD_SHIFT);
  165. lstate = IPATH_LINKACTIVE;
  166. break;
  167. default:
  168. ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
  169. ret = -EINVAL;
  170. goto bail;
  171. }
  172. ret = ipath_wait_linkstate(dd, lstate, 2000);
  173. bail:
  174. return ret;
  175. }
  176. EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
  177. /**
  178. * ipath_layer_set_mtu - set the MTU
  179. * @dd: the infinipath device
  180. * @arg: the new MTU
  181. *
  182. * we can handle "any" incoming size, the issue here is whether we
  183. * need to restrict our outgoing size. For now, we don't do any
  184. * sanity checking on this, and we don't deal with what happens to
  185. * programs that are already running when the size changes.
  186. * NOTE: changing the MTU will usually cause the IBC to go back to
  187. * link initialize (IPATH_IBSTATE_INIT) state...
  188. */
  189. int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
  190. {
  191. u32 piosize;
  192. int changed = 0;
  193. int ret;
  194. /*
  195. * mtu is IB data payload max. It's the largest power of 2 less
  196. * than piosize (or even larger, since it only really controls the
  197. * largest we can receive; we can send the max of the mtu and
  198. * piosize). We check that it's one of the valid IB sizes.
  199. */
  200. if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
  201. arg != 4096) {
  202. ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
  203. ret = -EINVAL;
  204. goto bail;
  205. }
  206. if (dd->ipath_ibmtu == arg) {
  207. ret = 0; /* same as current */
  208. goto bail;
  209. }
  210. piosize = dd->ipath_ibmaxlen;
  211. dd->ipath_ibmtu = arg;
  212. if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
  213. /* Only if it's not the initial value (or reset to it) */
  214. if (piosize != dd->ipath_init_ibmaxlen) {
  215. dd->ipath_ibmaxlen = piosize;
  216. changed = 1;
  217. }
  218. } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
  219. piosize = arg + IPATH_PIO_MAXIBHDR;
  220. ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
  221. "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
  222. arg);
  223. dd->ipath_ibmaxlen = piosize;
  224. changed = 1;
  225. }
  226. if (changed) {
  227. /*
  228. * set the IBC maxpktlength to the size of our pio
  229. * buffers in words
  230. */
  231. u64 ibc = dd->ipath_ibcctrl;
  232. ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
  233. INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
  234. piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
  235. dd->ipath_ibmaxlen = piosize;
  236. piosize /= sizeof(u32); /* in words */
  237. /*
  238. * for ICRC, which we only send in diag test pkt mode, and
  239. * we don't need to worry about that for mtu
  240. */
  241. piosize += 1;
  242. ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
  243. dd->ipath_ibcctrl = ibc;
  244. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  245. dd->ipath_ibcctrl);
  246. dd->ipath_f_tidtemplate(dd);
  247. }
  248. ret = 0;
  249. bail:
  250. return ret;
  251. }
  252. EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
  253. int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
  254. {
  255. ipath_stats.sps_lid[dd->ipath_unit] = arg;
  256. dd->ipath_lid = arg;
  257. dd->ipath_lmc = lmc;
  258. mutex_lock(&ipath_layer_mutex);
  259. if (dd->ipath_layer.l_arg && layer_intr)
  260. layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
  261. mutex_unlock(&ipath_layer_mutex);
  262. return 0;
  263. }
  264. EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
  265. int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
  266. {
  267. /* XXX - need to inform anyone who cares this just happened. */
  268. dd->ipath_guid = guid;
  269. return 0;
  270. }
  271. EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
  272. __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
  273. {
  274. return dd->ipath_guid;
  275. }
  276. EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
  277. u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
  278. {
  279. return dd->ipath_nguid;
  280. }
  281. EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
  282. int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
  283. u32 * boardrev, u32 * majrev, u32 * minrev)
  284. {
  285. *vendor = dd->ipath_vendorid;
  286. *boardrev = dd->ipath_boardrev;
  287. *majrev = dd->ipath_majrev;
  288. *minrev = dd->ipath_minrev;
  289. return 0;
  290. }
  291. EXPORT_SYMBOL_GPL(ipath_layer_query_device);
  292. u32 ipath_layer_get_flags(struct ipath_devdata *dd)
  293. {
  294. return dd->ipath_flags;
  295. }
  296. EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
  297. struct device *ipath_layer_get_device(struct ipath_devdata *dd)
  298. {
  299. return &dd->pcidev->dev;
  300. }
  301. EXPORT_SYMBOL_GPL(ipath_layer_get_device);
  302. u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
  303. {
  304. return dd->ipath_deviceid;
  305. }
  306. EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
  307. u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
  308. {
  309. return dd->ipath_lastibcstat;
  310. }
  311. EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
  312. u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
  313. {
  314. return dd->ipath_ibmtu;
  315. }
  316. EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
  317. void ipath_layer_add(struct ipath_devdata *dd)
  318. {
  319. mutex_lock(&ipath_layer_mutex);
  320. if (layer_add_one)
  321. dd->ipath_layer.l_arg =
  322. layer_add_one(dd->ipath_unit, dd);
  323. if (verbs_add_one)
  324. dd->verbs_layer.l_arg =
  325. verbs_add_one(dd->ipath_unit, dd);
  326. mutex_unlock(&ipath_layer_mutex);
  327. }
  328. void ipath_layer_del(struct ipath_devdata *dd)
  329. {
  330. mutex_lock(&ipath_layer_mutex);
  331. if (dd->ipath_layer.l_arg && layer_remove_one) {
  332. layer_remove_one(dd->ipath_layer.l_arg);
  333. dd->ipath_layer.l_arg = NULL;
  334. }
  335. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  336. verbs_remove_one(dd->verbs_layer.l_arg);
  337. dd->verbs_layer.l_arg = NULL;
  338. }
  339. mutex_unlock(&ipath_layer_mutex);
  340. }
  341. int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
  342. void (*l_remove)(void *),
  343. int (*l_intr)(void *, u32),
  344. int (*l_rcv)(void *, void *, struct sk_buff *),
  345. u16 l_rcv_opcode,
  346. int (*l_rcv_lid)(void *, void *))
  347. {
  348. struct ipath_devdata *dd, *tmp;
  349. unsigned long flags;
  350. mutex_lock(&ipath_layer_mutex);
  351. layer_add_one = l_add;
  352. layer_remove_one = l_remove;
  353. layer_intr = l_intr;
  354. layer_rcv = l_rcv;
  355. layer_rcv_lid = l_rcv_lid;
  356. ipath_layer_rcv_opcode = l_rcv_opcode;
  357. spin_lock_irqsave(&ipath_devs_lock, flags);
  358. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  359. if (!(dd->ipath_flags & IPATH_INITTED))
  360. continue;
  361. if (dd->ipath_layer.l_arg)
  362. continue;
  363. if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
  364. *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  365. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  366. dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
  367. spin_lock_irqsave(&ipath_devs_lock, flags);
  368. }
  369. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  370. mutex_unlock(&ipath_layer_mutex);
  371. return 0;
  372. }
  373. EXPORT_SYMBOL_GPL(ipath_layer_register);
  374. void ipath_layer_unregister(void)
  375. {
  376. struct ipath_devdata *dd, *tmp;
  377. unsigned long flags;
  378. mutex_lock(&ipath_layer_mutex);
  379. spin_lock_irqsave(&ipath_devs_lock, flags);
  380. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  381. if (dd->ipath_layer.l_arg && layer_remove_one) {
  382. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  383. layer_remove_one(dd->ipath_layer.l_arg);
  384. spin_lock_irqsave(&ipath_devs_lock, flags);
  385. dd->ipath_layer.l_arg = NULL;
  386. }
  387. }
  388. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  389. layer_add_one = NULL;
  390. layer_remove_one = NULL;
  391. layer_intr = NULL;
  392. layer_rcv = NULL;
  393. layer_rcv_lid = NULL;
  394. mutex_unlock(&ipath_layer_mutex);
  395. }
  396. EXPORT_SYMBOL_GPL(ipath_layer_unregister);
  397. static void __ipath_verbs_timer(unsigned long arg)
  398. {
  399. struct ipath_devdata *dd = (struct ipath_devdata *) arg;
  400. /*
  401. * If port 0 receive packet interrupts are not available, or
  402. * can be missed, poll the receive queue
  403. */
  404. if (dd->ipath_flags & IPATH_POLL_RX_INTR)
  405. ipath_kreceive(dd);
  406. /* Handle verbs layer timeouts. */
  407. if (dd->verbs_layer.l_arg && verbs_timer_cb)
  408. verbs_timer_cb(dd->verbs_layer.l_arg);
  409. mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
  410. }
  411. /**
  412. * ipath_verbs_register - verbs layer registration
  413. * @l_piobufavail: callback for when PIO buffers become available
  414. * @l_rcv: callback for receiving a packet
  415. * @l_timer_cb: timer callback
  416. * @ipath_devdata: device data structure is put here
  417. */
  418. int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
  419. void (*l_remove)(void *arg),
  420. int (*l_piobufavail) (void *arg),
  421. void (*l_rcv) (void *arg, void *rhdr,
  422. void *data, u32 tlen),
  423. void (*l_timer_cb) (void *arg))
  424. {
  425. struct ipath_devdata *dd, *tmp;
  426. unsigned long flags;
  427. mutex_lock(&ipath_layer_mutex);
  428. verbs_add_one = l_add;
  429. verbs_remove_one = l_remove;
  430. verbs_piobufavail = l_piobufavail;
  431. verbs_rcv = l_rcv;
  432. verbs_timer_cb = l_timer_cb;
  433. spin_lock_irqsave(&ipath_devs_lock, flags);
  434. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  435. if (!(dd->ipath_flags & IPATH_INITTED))
  436. continue;
  437. if (dd->verbs_layer.l_arg)
  438. continue;
  439. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  440. dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
  441. spin_lock_irqsave(&ipath_devs_lock, flags);
  442. }
  443. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  444. mutex_unlock(&ipath_layer_mutex);
  445. ipath_verbs_registered = 1;
  446. return 0;
  447. }
  448. EXPORT_SYMBOL_GPL(ipath_verbs_register);
  449. void ipath_verbs_unregister(void)
  450. {
  451. struct ipath_devdata *dd, *tmp;
  452. unsigned long flags;
  453. mutex_lock(&ipath_layer_mutex);
  454. spin_lock_irqsave(&ipath_devs_lock, flags);
  455. list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
  456. *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  457. if (dd->verbs_layer.l_arg && verbs_remove_one) {
  458. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  459. verbs_remove_one(dd->verbs_layer.l_arg);
  460. spin_lock_irqsave(&ipath_devs_lock, flags);
  461. dd->verbs_layer.l_arg = NULL;
  462. }
  463. }
  464. spin_unlock_irqrestore(&ipath_devs_lock, flags);
  465. verbs_add_one = NULL;
  466. verbs_remove_one = NULL;
  467. verbs_piobufavail = NULL;
  468. verbs_rcv = NULL;
  469. verbs_timer_cb = NULL;
  470. mutex_unlock(&ipath_layer_mutex);
  471. }
  472. EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
  473. int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
  474. {
  475. int ret;
  476. u32 intval = 0;
  477. mutex_lock(&ipath_layer_mutex);
  478. if (!dd->ipath_layer.l_arg) {
  479. ret = -EINVAL;
  480. goto bail;
  481. }
  482. ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
  483. if (ret < 0)
  484. goto bail;
  485. *pktmax = dd->ipath_ibmaxlen;
  486. if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
  487. intval |= IPATH_LAYER_INT_IF_UP;
  488. if (ipath_stats.sps_lid[dd->ipath_unit])
  489. intval |= IPATH_LAYER_INT_LID;
  490. if (ipath_stats.sps_mlid[dd->ipath_unit])
  491. intval |= IPATH_LAYER_INT_BCAST;
  492. /*
  493. * do this on open, in case low level is already up and
  494. * just layered driver was reloaded, etc.
  495. */
  496. if (intval)
  497. layer_intr(dd->ipath_layer.l_arg, intval);
  498. ret = 0;
  499. bail:
  500. mutex_unlock(&ipath_layer_mutex);
  501. return ret;
  502. }
  503. EXPORT_SYMBOL_GPL(ipath_layer_open);
  504. u16 ipath_layer_get_lid(struct ipath_devdata *dd)
  505. {
  506. return dd->ipath_lid;
  507. }
  508. EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
  509. /**
  510. * ipath_layer_get_mac - get the MAC address
  511. * @dd: the infinipath device
  512. * @mac: the MAC is put here
  513. *
  514. * This is the EUID-64 OUI octets (top 3), then
  515. * skip the next 2 (which should both be zero or 0xff).
  516. * The returned MAC is in network order
  517. * mac points to at least 6 bytes of buffer
  518. * We assume that by the time the LID is set, that the GUID is as valid
  519. * as it's ever going to be, rather than adding yet another status bit.
  520. */
  521. int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
  522. {
  523. u8 *guid;
  524. guid = (u8 *) &dd->ipath_guid;
  525. mac[0] = guid[0];
  526. mac[1] = guid[1];
  527. mac[2] = guid[2];
  528. mac[3] = guid[5];
  529. mac[4] = guid[6];
  530. mac[5] = guid[7];
  531. if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
  532. ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
  533. "%x %x\n", guid[3], guid[4]);
  534. return 0;
  535. }
  536. EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
  537. u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
  538. {
  539. return dd->ipath_mlid;
  540. }
  541. EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
  542. u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
  543. {
  544. return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
  545. }
  546. EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
  547. static void update_sge(struct ipath_sge_state *ss, u32 length)
  548. {
  549. struct ipath_sge *sge = &ss->sge;
  550. sge->vaddr += length;
  551. sge->length -= length;
  552. sge->sge_length -= length;
  553. if (sge->sge_length == 0) {
  554. if (--ss->num_sge)
  555. *sge = *ss->sg_list++;
  556. } else if (sge->length == 0 && sge->mr != NULL) {
  557. if (++sge->n >= IPATH_SEGSZ) {
  558. if (++sge->m >= sge->mr->mapsz)
  559. return;
  560. sge->n = 0;
  561. }
  562. sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
  563. sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
  564. }
  565. }
  566. #ifdef __LITTLE_ENDIAN
  567. static inline u32 get_upper_bits(u32 data, u32 shift)
  568. {
  569. return data >> shift;
  570. }
  571. static inline u32 set_upper_bits(u32 data, u32 shift)
  572. {
  573. return data << shift;
  574. }
  575. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  576. {
  577. data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
  578. data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  579. return data;
  580. }
  581. #else
  582. static inline u32 get_upper_bits(u32 data, u32 shift)
  583. {
  584. return data << shift;
  585. }
  586. static inline u32 set_upper_bits(u32 data, u32 shift)
  587. {
  588. return data >> shift;
  589. }
  590. static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
  591. {
  592. data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
  593. data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
  594. return data;
  595. }
  596. #endif
  597. static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
  598. u32 length)
  599. {
  600. u32 extra = 0;
  601. u32 data = 0;
  602. u32 last;
  603. while (1) {
  604. u32 len = ss->sge.length;
  605. u32 off;
  606. BUG_ON(len == 0);
  607. if (len > length)
  608. len = length;
  609. if (len > ss->sge.sge_length)
  610. len = ss->sge.sge_length;
  611. /* If the source address is not aligned, try to align it. */
  612. off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
  613. if (off) {
  614. u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
  615. ~(sizeof(u32) - 1));
  616. u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
  617. u32 y;
  618. y = sizeof(u32) - off;
  619. if (len > y)
  620. len = y;
  621. if (len + extra >= sizeof(u32)) {
  622. data |= set_upper_bits(v, extra *
  623. BITS_PER_BYTE);
  624. len = sizeof(u32) - extra;
  625. if (len == length) {
  626. last = data;
  627. break;
  628. }
  629. __raw_writel(data, piobuf);
  630. piobuf++;
  631. extra = 0;
  632. data = 0;
  633. } else {
  634. /* Clear unused upper bytes */
  635. data |= clear_upper_bytes(v, len, extra);
  636. if (len == length) {
  637. last = data;
  638. break;
  639. }
  640. extra += len;
  641. }
  642. } else if (extra) {
  643. /* Source address is aligned. */
  644. u32 *addr = (u32 *) ss->sge.vaddr;
  645. int shift = extra * BITS_PER_BYTE;
  646. int ushift = 32 - shift;
  647. u32 l = len;
  648. while (l >= sizeof(u32)) {
  649. u32 v = *addr;
  650. data |= set_upper_bits(v, shift);
  651. __raw_writel(data, piobuf);
  652. data = get_upper_bits(v, ushift);
  653. piobuf++;
  654. addr++;
  655. l -= sizeof(u32);
  656. }
  657. /*
  658. * We still have 'extra' number of bytes leftover.
  659. */
  660. if (l) {
  661. u32 v = *addr;
  662. if (l + extra >= sizeof(u32)) {
  663. data |= set_upper_bits(v, shift);
  664. len -= l + extra - sizeof(u32);
  665. if (len == length) {
  666. last = data;
  667. break;
  668. }
  669. __raw_writel(data, piobuf);
  670. piobuf++;
  671. extra = 0;
  672. data = 0;
  673. } else {
  674. /* Clear unused upper bytes */
  675. data |= clear_upper_bytes(v, l,
  676. extra);
  677. if (len == length) {
  678. last = data;
  679. break;
  680. }
  681. extra += l;
  682. }
  683. } else if (len == length) {
  684. last = data;
  685. break;
  686. }
  687. } else if (len == length) {
  688. u32 w;
  689. /*
  690. * Need to round up for the last dword in the
  691. * packet.
  692. */
  693. w = (len + 3) >> 2;
  694. __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
  695. piobuf += w - 1;
  696. last = ((u32 *) ss->sge.vaddr)[w - 1];
  697. break;
  698. } else {
  699. u32 w = len >> 2;
  700. __iowrite32_copy(piobuf, ss->sge.vaddr, w);
  701. piobuf += w;
  702. extra = len & (sizeof(u32) - 1);
  703. if (extra) {
  704. u32 v = ((u32 *) ss->sge.vaddr)[w];
  705. /* Clear unused upper bytes */
  706. data = clear_upper_bytes(v, extra, 0);
  707. }
  708. }
  709. update_sge(ss, len);
  710. length -= len;
  711. }
  712. /* must flush early everything before trigger word */
  713. ipath_flush_wc();
  714. __raw_writel(last, piobuf);
  715. /* be sure trigger word is written */
  716. ipath_flush_wc();
  717. update_sge(ss, length);
  718. }
  719. /**
  720. * ipath_verbs_send - send a packet from the verbs layer
  721. * @dd: the infinipath device
  722. * @hdrwords: the number of works in the header
  723. * @hdr: the packet header
  724. * @len: the length of the packet in bytes
  725. * @ss: the SGE to send
  726. *
  727. * This is like ipath_sma_send_pkt() in that we need to be able to send
  728. * packets after the chip is initialized (MADs) but also like
  729. * ipath_layer_send_hdr() since its used by the verbs layer.
  730. */
  731. int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
  732. u32 *hdr, u32 len, struct ipath_sge_state *ss)
  733. {
  734. u32 __iomem *piobuf;
  735. u32 plen;
  736. int ret;
  737. /* +1 is for the qword padding of pbc */
  738. plen = hdrwords + ((len + 3) >> 2) + 1;
  739. if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
  740. ipath_dbg("packet len 0x%x too long, failing\n", plen);
  741. ret = -EINVAL;
  742. goto bail;
  743. }
  744. /* Get a PIO buffer to use. */
  745. piobuf = ipath_getpiobuf(dd, NULL);
  746. if (unlikely(piobuf == NULL)) {
  747. ret = -EBUSY;
  748. goto bail;
  749. }
  750. /*
  751. * Write len to control qword, no flags.
  752. * We have to flush after the PBC for correctness on some cpus
  753. * or WC buffer can be written out of order.
  754. */
  755. writeq(plen, piobuf);
  756. ipath_flush_wc();
  757. piobuf += 2;
  758. if (len == 0) {
  759. /*
  760. * If there is just the header portion, must flush before
  761. * writing last word of header for correctness, and after
  762. * the last header word (trigger word).
  763. */
  764. __iowrite32_copy(piobuf, hdr, hdrwords - 1);
  765. ipath_flush_wc();
  766. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
  767. ipath_flush_wc();
  768. ret = 0;
  769. goto bail;
  770. }
  771. __iowrite32_copy(piobuf, hdr, hdrwords);
  772. piobuf += hdrwords;
  773. /* The common case is aligned and contained in one segment. */
  774. if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
  775. !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
  776. u32 w;
  777. /* Need to round up for the last dword in the packet. */
  778. w = (len + 3) >> 2;
  779. __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
  780. /* must flush early everything before trigger word */
  781. ipath_flush_wc();
  782. __raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
  783. piobuf + w - 1);
  784. /* be sure trigger word is written */
  785. ipath_flush_wc();
  786. update_sge(ss, len);
  787. ret = 0;
  788. goto bail;
  789. }
  790. copy_io(piobuf, ss, len);
  791. ret = 0;
  792. bail:
  793. return ret;
  794. }
  795. EXPORT_SYMBOL_GPL(ipath_verbs_send);
  796. int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
  797. u64 *rwords, u64 *spkts, u64 *rpkts,
  798. u64 *xmit_wait)
  799. {
  800. int ret;
  801. if (!(dd->ipath_flags & IPATH_INITTED)) {
  802. /* no hardware, freeze, etc. */
  803. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  804. ret = -EINVAL;
  805. goto bail;
  806. }
  807. *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  808. *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  809. *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  810. *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  811. *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
  812. ret = 0;
  813. bail:
  814. return ret;
  815. }
  816. EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
  817. /**
  818. * ipath_layer_get_counters - get various chip counters
  819. * @dd: the infinipath device
  820. * @cntrs: counters are placed here
  821. *
  822. * Return the counters needed by recv_pma_get_portcounters().
  823. */
  824. int ipath_layer_get_counters(struct ipath_devdata *dd,
  825. struct ipath_layer_counters *cntrs)
  826. {
  827. int ret;
  828. if (!(dd->ipath_flags & IPATH_INITTED)) {
  829. /* no hardware, freeze, etc. */
  830. ipath_dbg("unit %u not usable\n", dd->ipath_unit);
  831. ret = -EINVAL;
  832. goto bail;
  833. }
  834. cntrs->symbol_error_counter =
  835. ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
  836. cntrs->link_error_recovery_counter =
  837. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
  838. cntrs->link_downed_counter =
  839. ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
  840. cntrs->port_rcv_errors =
  841. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
  842. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
  843. ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
  844. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
  845. ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
  846. ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
  847. ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
  848. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
  849. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
  850. ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
  851. ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
  852. cntrs->port_rcv_remphys_errors =
  853. ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
  854. cntrs->port_xmit_discards =
  855. ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
  856. cntrs->port_xmit_data =
  857. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
  858. cntrs->port_rcv_data =
  859. ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
  860. cntrs->port_xmit_packets =
  861. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
  862. cntrs->port_rcv_packets =
  863. ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
  864. ret = 0;
  865. bail:
  866. return ret;
  867. }
  868. EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
  869. int ipath_layer_want_buffer(struct ipath_devdata *dd)
  870. {
  871. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  872. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  873. dd->ipath_sendctrl);
  874. return 0;
  875. }
  876. EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
  877. int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
  878. {
  879. int ret = 0;
  880. u32 __iomem *piobuf;
  881. u32 plen, *uhdr;
  882. size_t count;
  883. __be16 vlsllnh;
  884. if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
  885. ipath_dbg("send while not open\n");
  886. ret = -EINVAL;
  887. } else
  888. if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
  889. dd->ipath_lid == 0) {
  890. /*
  891. * lid check is for when sma hasn't yet configured
  892. */
  893. ret = -ENETDOWN;
  894. ipath_cdbg(VERBOSE, "send while not ready, "
  895. "mylid=%u, flags=0x%x\n",
  896. dd->ipath_lid, dd->ipath_flags);
  897. }
  898. vlsllnh = *((__be16 *) hdr);
  899. if (vlsllnh != htons(IPS_LRH_BTH)) {
  900. ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
  901. "not sending\n", be16_to_cpu(vlsllnh),
  902. IPS_LRH_BTH);
  903. ret = -EINVAL;
  904. }
  905. if (ret)
  906. goto done;
  907. /* Get a PIO buffer to use. */
  908. piobuf = ipath_getpiobuf(dd, NULL);
  909. if (piobuf == NULL) {
  910. ret = -EBUSY;
  911. goto done;
  912. }
  913. plen = (sizeof(*hdr) >> 2); /* actual length */
  914. ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
  915. writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
  916. ipath_flush_wc();
  917. piobuf += 2;
  918. uhdr = (u32 *)hdr;
  919. count = plen-1; /* amount we can copy before trigger word */
  920. __iowrite32_copy(piobuf, uhdr, count);
  921. ipath_flush_wc();
  922. __raw_writel(uhdr[count], piobuf + count);
  923. ipath_flush_wc(); /* ensure it's sent, now */
  924. ipath_stats.sps_ether_spkts++; /* ether packet sent */
  925. done:
  926. return ret;
  927. }
  928. EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
  929. int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
  930. {
  931. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  932. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  933. dd->ipath_sendctrl);
  934. return 0;
  935. }
  936. EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
  937. int ipath_layer_enable_timer(struct ipath_devdata *dd)
  938. {
  939. /*
  940. * HT-400 has a design flaw where the chip and kernel idea
  941. * of the tail register don't always agree, and therefore we won't
  942. * get an interrupt on the next packet received.
  943. * If the board supports per packet receive interrupts, use it.
  944. * Otherwise, the timer function periodically checks for packets
  945. * to cover this case.
  946. * Either way, the timer is needed for verbs layer related
  947. * processing.
  948. */
  949. if (dd->ipath_flags & IPATH_GPIO_INTR) {
  950. ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
  951. 0x2074076542310ULL);
  952. /* Enable GPIO bit 2 interrupt */
  953. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
  954. (u64) (1 << 2));
  955. }
  956. init_timer(&dd->verbs_layer.l_timer);
  957. dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
  958. dd->verbs_layer.l_timer.data = (unsigned long)dd;
  959. dd->verbs_layer.l_timer.expires = jiffies + 1;
  960. add_timer(&dd->verbs_layer.l_timer);
  961. return 0;
  962. }
  963. EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
  964. int ipath_layer_disable_timer(struct ipath_devdata *dd)
  965. {
  966. /* Disable GPIO bit 2 interrupt */
  967. if (dd->ipath_flags & IPATH_GPIO_INTR)
  968. ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
  969. del_timer_sync(&dd->verbs_layer.l_timer);
  970. return 0;
  971. }
  972. EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
  973. /**
  974. * ipath_layer_set_verbs_flags - set the verbs layer flags
  975. * @dd: the infinipath device
  976. * @flags: the flags to set
  977. */
  978. int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
  979. {
  980. struct ipath_devdata *ss;
  981. unsigned long lflags;
  982. spin_lock_irqsave(&ipath_devs_lock, lflags);
  983. list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
  984. if (!(ss->ipath_flags & IPATH_INITTED))
  985. continue;
  986. if ((flags & IPATH_VERBS_KERNEL_SMA) &&
  987. !(*ss->ipath_statusp & IPATH_STATUS_SMA))
  988. *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
  989. else
  990. *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
  991. }
  992. spin_unlock_irqrestore(&ipath_devs_lock, lflags);
  993. return 0;
  994. }
  995. EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
  996. /**
  997. * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
  998. * @dd: the infinipath device
  999. */
  1000. unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
  1001. {
  1002. return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
  1003. }
  1004. EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
  1005. /**
  1006. * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
  1007. * @dd: the infinipath device
  1008. * @index: the PKEY index
  1009. */
  1010. unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
  1011. {
  1012. unsigned ret;
  1013. if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
  1014. ret = 0;
  1015. else
  1016. ret = dd->ipath_pd[0]->port_pkeys[index];
  1017. return ret;
  1018. }
  1019. EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
  1020. /**
  1021. * ipath_layer_get_pkeys - return the PKEY table for port 0
  1022. * @dd: the infinipath device
  1023. * @pkeys: the pkey table is placed here
  1024. */
  1025. int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1026. {
  1027. struct ipath_portdata *pd = dd->ipath_pd[0];
  1028. memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
  1029. return 0;
  1030. }
  1031. EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
  1032. /**
  1033. * rm_pkey - decrecment the reference count for the given PKEY
  1034. * @dd: the infinipath device
  1035. * @key: the PKEY index
  1036. *
  1037. * Return true if this was the last reference and the hardware table entry
  1038. * needs to be changed.
  1039. */
  1040. static int rm_pkey(struct ipath_devdata *dd, u16 key)
  1041. {
  1042. int i;
  1043. int ret;
  1044. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1045. if (dd->ipath_pkeys[i] != key)
  1046. continue;
  1047. if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
  1048. dd->ipath_pkeys[i] = 0;
  1049. ret = 1;
  1050. goto bail;
  1051. }
  1052. break;
  1053. }
  1054. ret = 0;
  1055. bail:
  1056. return ret;
  1057. }
  1058. /**
  1059. * add_pkey - add the given PKEY to the hardware table
  1060. * @dd: the infinipath device
  1061. * @key: the PKEY
  1062. *
  1063. * Return an error code if unable to add the entry, zero if no change,
  1064. * or 1 if the hardware PKEY register needs to be updated.
  1065. */
  1066. static int add_pkey(struct ipath_devdata *dd, u16 key)
  1067. {
  1068. int i;
  1069. u16 lkey = key & 0x7FFF;
  1070. int any = 0;
  1071. int ret;
  1072. if (lkey == 0x7FFF) {
  1073. ret = 0;
  1074. goto bail;
  1075. }
  1076. /* Look for an empty slot or a matching PKEY. */
  1077. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1078. if (!dd->ipath_pkeys[i]) {
  1079. any++;
  1080. continue;
  1081. }
  1082. /* If it matches exactly, try to increment the ref count */
  1083. if (dd->ipath_pkeys[i] == key) {
  1084. if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
  1085. ret = 0;
  1086. goto bail;
  1087. }
  1088. /* Lost the race. Look for an empty slot below. */
  1089. atomic_dec(&dd->ipath_pkeyrefs[i]);
  1090. any++;
  1091. }
  1092. /*
  1093. * It makes no sense to have both the limited and unlimited
  1094. * PKEY set at the same time since the unlimited one will
  1095. * disable the limited one.
  1096. */
  1097. if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
  1098. ret = -EEXIST;
  1099. goto bail;
  1100. }
  1101. }
  1102. if (!any) {
  1103. ret = -EBUSY;
  1104. goto bail;
  1105. }
  1106. for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
  1107. if (!dd->ipath_pkeys[i] &&
  1108. atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
  1109. /* for ipathstats, etc. */
  1110. ipath_stats.sps_pkeys[i] = lkey;
  1111. dd->ipath_pkeys[i] = key;
  1112. ret = 1;
  1113. goto bail;
  1114. }
  1115. }
  1116. ret = -EBUSY;
  1117. bail:
  1118. return ret;
  1119. }
  1120. /**
  1121. * ipath_layer_set_pkeys - set the PKEY table for port 0
  1122. * @dd: the infinipath device
  1123. * @pkeys: the PKEY table
  1124. */
  1125. int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
  1126. {
  1127. struct ipath_portdata *pd;
  1128. int i;
  1129. int changed = 0;
  1130. pd = dd->ipath_pd[0];
  1131. for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
  1132. u16 key = pkeys[i];
  1133. u16 okey = pd->port_pkeys[i];
  1134. if (key == okey)
  1135. continue;
  1136. /*
  1137. * The value of this PKEY table entry is changing.
  1138. * Remove the old entry in the hardware's array of PKEYs.
  1139. */
  1140. if (okey & 0x7FFF)
  1141. changed |= rm_pkey(dd, okey);
  1142. if (key & 0x7FFF) {
  1143. int ret = add_pkey(dd, key);
  1144. if (ret < 0)
  1145. key = 0;
  1146. else
  1147. changed |= ret;
  1148. }
  1149. pd->port_pkeys[i] = key;
  1150. }
  1151. if (changed) {
  1152. u64 pkey;
  1153. pkey = (u64) dd->ipath_pkeys[0] |
  1154. ((u64) dd->ipath_pkeys[1] << 16) |
  1155. ((u64) dd->ipath_pkeys[2] << 32) |
  1156. ((u64) dd->ipath_pkeys[3] << 48);
  1157. ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
  1158. (unsigned long long) pkey);
  1159. ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
  1160. pkey);
  1161. }
  1162. return 0;
  1163. }
  1164. EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
  1165. /**
  1166. * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
  1167. * @dd: the infinipath device
  1168. *
  1169. * Returns zero if the default is POLL, 1 if the default is SLEEP.
  1170. */
  1171. int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
  1172. {
  1173. return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
  1174. }
  1175. EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
  1176. /**
  1177. * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
  1178. * @dd: the infinipath device
  1179. * @sleep: the new state
  1180. *
  1181. * Note that this will only take effect when the link state changes.
  1182. */
  1183. int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
  1184. int sleep)
  1185. {
  1186. if (sleep)
  1187. dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1188. else
  1189. dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
  1190. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1191. dd->ipath_ibcctrl);
  1192. return 0;
  1193. }
  1194. EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
  1195. int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
  1196. {
  1197. return (dd->ipath_ibcctrl >>
  1198. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1199. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1200. }
  1201. EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
  1202. /**
  1203. * ipath_layer_set_phyerrthreshold - set the physical error threshold
  1204. * @dd: the infinipath device
  1205. * @n: the new threshold
  1206. *
  1207. * Note that this will only take effect when the link state changes.
  1208. */
  1209. int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
  1210. {
  1211. unsigned v;
  1212. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
  1213. INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
  1214. if (v != n) {
  1215. dd->ipath_ibcctrl &=
  1216. ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
  1217. INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
  1218. dd->ipath_ibcctrl |=
  1219. (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
  1220. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1221. dd->ipath_ibcctrl);
  1222. }
  1223. return 0;
  1224. }
  1225. EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
  1226. int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
  1227. {
  1228. return (dd->ipath_ibcctrl >>
  1229. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1230. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1231. }
  1232. EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
  1233. /**
  1234. * ipath_layer_set_overrunthreshold - set the overrun threshold
  1235. * @dd: the infinipath device
  1236. * @n: the new threshold
  1237. *
  1238. * Note that this will only take effect when the link state changes.
  1239. */
  1240. int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
  1241. {
  1242. unsigned v;
  1243. v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
  1244. INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
  1245. if (v != n) {
  1246. dd->ipath_ibcctrl &=
  1247. ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
  1248. INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
  1249. dd->ipath_ibcctrl |=
  1250. (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
  1251. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
  1252. dd->ipath_ibcctrl);
  1253. }
  1254. return 0;
  1255. }
  1256. EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
  1257. int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
  1258. size_t namelen)
  1259. {
  1260. return dd->ipath_f_get_boardname(dd, name, namelen);
  1261. }
  1262. EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
  1263. u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
  1264. {
  1265. return dd->ipath_rcvhdrentsize;
  1266. }
  1267. EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);