qib_file_ops.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  3. * All rights reserved.
  4. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/pci.h>
  35. #include <linux/poll.h>
  36. #include <linux/cdev.h>
  37. #include <linux/swap.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/highmem.h>
  40. #include <linux/io.h>
  41. #include <linux/uio.h>
  42. #include <linux/jiffies.h>
  43. #include <asm/pgtable.h>
  44. #include <linux/delay.h>
  45. #include "qib.h"
  46. #include "qib_common.h"
  47. #include "qib_user_sdma.h"
  48. static int qib_open(struct inode *, struct file *);
  49. static int qib_close(struct inode *, struct file *);
  50. static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
  51. static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
  52. unsigned long, loff_t);
  53. static unsigned int qib_poll(struct file *, struct poll_table_struct *);
  54. static int qib_mmapf(struct file *, struct vm_area_struct *);
  55. static const struct file_operations qib_file_ops = {
  56. .owner = THIS_MODULE,
  57. .write = qib_write,
  58. .aio_write = qib_aio_write,
  59. .open = qib_open,
  60. .release = qib_close,
  61. .poll = qib_poll,
  62. .mmap = qib_mmapf,
  63. .llseek = noop_llseek,
  64. };
  65. /*
  66. * Convert kernel virtual addresses to physical addresses so they don't
  67. * potentially conflict with the chip addresses used as mmap offsets.
  68. * It doesn't really matter what mmap offset we use as long as we can
  69. * interpret it correctly.
  70. */
  71. static u64 cvt_kvaddr(void *p)
  72. {
  73. struct page *page;
  74. u64 paddr = 0;
  75. page = vmalloc_to_page(p);
  76. if (page)
  77. paddr = page_to_pfn(page) << PAGE_SHIFT;
  78. return paddr;
  79. }
  80. static int qib_get_base_info(struct file *fp, void __user *ubase,
  81. size_t ubase_size)
  82. {
  83. struct qib_ctxtdata *rcd = ctxt_fp(fp);
  84. int ret = 0;
  85. struct qib_base_info *kinfo = NULL;
  86. struct qib_devdata *dd = rcd->dd;
  87. struct qib_pportdata *ppd = rcd->ppd;
  88. unsigned subctxt_cnt;
  89. int shared, master;
  90. size_t sz;
  91. subctxt_cnt = rcd->subctxt_cnt;
  92. if (!subctxt_cnt) {
  93. shared = 0;
  94. master = 0;
  95. subctxt_cnt = 1;
  96. } else {
  97. shared = 1;
  98. master = !subctxt_fp(fp);
  99. }
  100. sz = sizeof(*kinfo);
  101. /* If context sharing is not requested, allow the old size structure */
  102. if (!shared)
  103. sz -= 7 * sizeof(u64);
  104. if (ubase_size < sz) {
  105. ret = -EINVAL;
  106. goto bail;
  107. }
  108. kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
  109. if (kinfo == NULL) {
  110. ret = -ENOMEM;
  111. goto bail;
  112. }
  113. ret = dd->f_get_base_info(rcd, kinfo);
  114. if (ret < 0)
  115. goto bail;
  116. kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
  117. kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
  118. kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
  119. kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
  120. /*
  121. * have to mmap whole thing
  122. */
  123. kinfo->spi_rcv_egrbuftotlen =
  124. rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
  125. kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
  126. kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
  127. rcd->rcvegrbuf_chunks;
  128. kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
  129. if (master)
  130. kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
  131. /*
  132. * for this use, may be cfgctxts summed over all chips that
  133. * are are configured and present
  134. */
  135. kinfo->spi_nctxts = dd->cfgctxts;
  136. /* unit (chip/board) our context is on */
  137. kinfo->spi_unit = dd->unit;
  138. kinfo->spi_port = ppd->port;
  139. /* for now, only a single page */
  140. kinfo->spi_tid_maxsize = PAGE_SIZE;
  141. /*
  142. * Doing this per context, and based on the skip value, etc. This has
  143. * to be the actual buffer size, since the protocol code treats it
  144. * as an array.
  145. *
  146. * These have to be set to user addresses in the user code via mmap.
  147. * These values are used on return to user code for the mmap target
  148. * addresses only. For 32 bit, same 44 bit address problem, so use
  149. * the physical address, not virtual. Before 2.6.11, using the
  150. * page_address() macro worked, but in 2.6.11, even that returns the
  151. * full 64 bit address (upper bits all 1's). So far, using the
  152. * physical addresses (or chip offsets, for chip mapping) works, but
  153. * no doubt some future kernel release will change that, and we'll be
  154. * on to yet another method of dealing with this.
  155. * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
  156. * since the chips with non-zero rhf_offset don't normally
  157. * enable tail register updates to host memory, but for testing,
  158. * both can be enabled and used.
  159. */
  160. kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
  161. kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
  162. kinfo->spi_rhf_offset = dd->rhf_offset;
  163. kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
  164. kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
  165. /* setup per-unit (not port) status area for user programs */
  166. kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
  167. (char *) ppd->statusp -
  168. (char *) dd->pioavailregs_dma;
  169. kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
  170. if (!shared) {
  171. kinfo->spi_piocnt = rcd->piocnt;
  172. kinfo->spi_piobufbase = (u64) rcd->piobufs;
  173. kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
  174. } else if (master) {
  175. kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
  176. (rcd->piocnt % subctxt_cnt);
  177. /* Master's PIO buffers are after all the slave's */
  178. kinfo->spi_piobufbase = (u64) rcd->piobufs +
  179. dd->palign *
  180. (rcd->piocnt - kinfo->spi_piocnt);
  181. } else {
  182. unsigned slave = subctxt_fp(fp) - 1;
  183. kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
  184. kinfo->spi_piobufbase = (u64) rcd->piobufs +
  185. dd->palign * kinfo->spi_piocnt * slave;
  186. }
  187. if (shared) {
  188. kinfo->spi_sendbuf_status =
  189. cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
  190. /* only spi_subctxt_* fields should be set in this block! */
  191. kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
  192. kinfo->spi_subctxt_rcvegrbuf =
  193. cvt_kvaddr(rcd->subctxt_rcvegrbuf);
  194. kinfo->spi_subctxt_rcvhdr_base =
  195. cvt_kvaddr(rcd->subctxt_rcvhdr_base);
  196. }
  197. /*
  198. * All user buffers are 2KB buffers. If we ever support
  199. * giving 4KB buffers to user processes, this will need some
  200. * work. Can't use piobufbase directly, because it has
  201. * both 2K and 4K buffer base values.
  202. */
  203. kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
  204. dd->palign;
  205. kinfo->spi_pioalign = dd->palign;
  206. kinfo->spi_qpair = QIB_KD_QP;
  207. /*
  208. * user mode PIO buffers are always 2KB, even when 4KB can
  209. * be received, and sent via the kernel; this is ibmaxlen
  210. * for 2K MTU.
  211. */
  212. kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
  213. kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
  214. kinfo->spi_ctxt = rcd->ctxt;
  215. kinfo->spi_subctxt = subctxt_fp(fp);
  216. kinfo->spi_sw_version = QIB_KERN_SWVERSION;
  217. kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
  218. kinfo->spi_hw_version = dd->revision;
  219. if (master)
  220. kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
  221. sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
  222. if (copy_to_user(ubase, kinfo, sz))
  223. ret = -EFAULT;
  224. bail:
  225. kfree(kinfo);
  226. return ret;
  227. }
  228. /**
  229. * qib_tid_update - update a context TID
  230. * @rcd: the context
  231. * @fp: the qib device file
  232. * @ti: the TID information
  233. *
  234. * The new implementation as of Oct 2004 is that the driver assigns
  235. * the tid and returns it to the caller. To reduce search time, we
  236. * keep a cursor for each context, walking the shadow tid array to find
  237. * one that's not in use.
  238. *
  239. * For now, if we can't allocate the full list, we fail, although
  240. * in the long run, we'll allocate as many as we can, and the
  241. * caller will deal with that by trying the remaining pages later.
  242. * That means that when we fail, we have to mark the tids as not in
  243. * use again, in our shadow copy.
  244. *
  245. * It's up to the caller to free the tids when they are done.
  246. * We'll unlock the pages as they free them.
  247. *
  248. * Also, right now we are locking one page at a time, but since
  249. * the intended use of this routine is for a single group of
  250. * virtually contiguous pages, that should change to improve
  251. * performance.
  252. */
  253. static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
  254. const struct qib_tid_info *ti)
  255. {
  256. int ret = 0, ntids;
  257. u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
  258. u16 *tidlist;
  259. struct qib_devdata *dd = rcd->dd;
  260. u64 physaddr;
  261. unsigned long vaddr;
  262. u64 __iomem *tidbase;
  263. unsigned long tidmap[8];
  264. struct page **pagep = NULL;
  265. unsigned subctxt = subctxt_fp(fp);
  266. if (!dd->pageshadow) {
  267. ret = -ENOMEM;
  268. goto done;
  269. }
  270. cnt = ti->tidcnt;
  271. if (!cnt) {
  272. ret = -EFAULT;
  273. goto done;
  274. }
  275. ctxttid = rcd->ctxt * dd->rcvtidcnt;
  276. if (!rcd->subctxt_cnt) {
  277. tidcnt = dd->rcvtidcnt;
  278. tid = rcd->tidcursor;
  279. tidoff = 0;
  280. } else if (!subctxt) {
  281. tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
  282. (dd->rcvtidcnt % rcd->subctxt_cnt);
  283. tidoff = dd->rcvtidcnt - tidcnt;
  284. ctxttid += tidoff;
  285. tid = tidcursor_fp(fp);
  286. } else {
  287. tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
  288. tidoff = tidcnt * (subctxt - 1);
  289. ctxttid += tidoff;
  290. tid = tidcursor_fp(fp);
  291. }
  292. if (cnt > tidcnt) {
  293. /* make sure it all fits in tid_pg_list */
  294. qib_devinfo(dd->pcidev, "Process tried to allocate %u "
  295. "TIDs, only trying max (%u)\n", cnt, tidcnt);
  296. cnt = tidcnt;
  297. }
  298. pagep = (struct page **) rcd->tid_pg_list;
  299. tidlist = (u16 *) &pagep[dd->rcvtidcnt];
  300. pagep += tidoff;
  301. tidlist += tidoff;
  302. memset(tidmap, 0, sizeof(tidmap));
  303. /* before decrement; chip actual # */
  304. ntids = tidcnt;
  305. tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
  306. dd->rcvtidbase +
  307. ctxttid * sizeof(*tidbase));
  308. /* virtual address of first page in transfer */
  309. vaddr = ti->tidvaddr;
  310. if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
  311. cnt * PAGE_SIZE)) {
  312. ret = -EFAULT;
  313. goto done;
  314. }
  315. ret = qib_get_user_pages(vaddr, cnt, pagep);
  316. if (ret) {
  317. /*
  318. * if (ret == -EBUSY)
  319. * We can't continue because the pagep array won't be
  320. * initialized. This should never happen,
  321. * unless perhaps the user has mpin'ed the pages
  322. * themselves.
  323. */
  324. qib_devinfo(dd->pcidev,
  325. "Failed to lock addr %p, %u pages: "
  326. "errno %d\n", (void *) vaddr, cnt, -ret);
  327. goto done;
  328. }
  329. for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
  330. for (; ntids--; tid++) {
  331. if (tid == tidcnt)
  332. tid = 0;
  333. if (!dd->pageshadow[ctxttid + tid])
  334. break;
  335. }
  336. if (ntids < 0) {
  337. /*
  338. * Oops, wrapped all the way through their TIDs,
  339. * and didn't have enough free; see comments at
  340. * start of routine
  341. */
  342. i--; /* last tidlist[i] not filled in */
  343. ret = -ENOMEM;
  344. break;
  345. }
  346. tidlist[i] = tid + tidoff;
  347. /* we "know" system pages and TID pages are same size */
  348. dd->pageshadow[ctxttid + tid] = pagep[i];
  349. dd->physshadow[ctxttid + tid] =
  350. qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
  351. PCI_DMA_FROMDEVICE);
  352. /*
  353. * don't need atomic or it's overhead
  354. */
  355. __set_bit(tid, tidmap);
  356. physaddr = dd->physshadow[ctxttid + tid];
  357. /* PERFORMANCE: below should almost certainly be cached */
  358. dd->f_put_tid(dd, &tidbase[tid],
  359. RCVHQ_RCV_TYPE_EXPECTED, physaddr);
  360. /*
  361. * don't check this tid in qib_ctxtshadow, since we
  362. * just filled it in; start with the next one.
  363. */
  364. tid++;
  365. }
  366. if (ret) {
  367. u32 limit;
  368. cleanup:
  369. /* jump here if copy out of updated info failed... */
  370. /* same code that's in qib_free_tid() */
  371. limit = sizeof(tidmap) * BITS_PER_BYTE;
  372. if (limit > tidcnt)
  373. /* just in case size changes in future */
  374. limit = tidcnt;
  375. tid = find_first_bit((const unsigned long *)tidmap, limit);
  376. for (; tid < limit; tid++) {
  377. if (!test_bit(tid, tidmap))
  378. continue;
  379. if (dd->pageshadow[ctxttid + tid]) {
  380. dma_addr_t phys;
  381. phys = dd->physshadow[ctxttid + tid];
  382. dd->physshadow[ctxttid + tid] = dd->tidinvalid;
  383. /* PERFORMANCE: below should almost certainly
  384. * be cached
  385. */
  386. dd->f_put_tid(dd, &tidbase[tid],
  387. RCVHQ_RCV_TYPE_EXPECTED,
  388. dd->tidinvalid);
  389. pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
  390. PCI_DMA_FROMDEVICE);
  391. dd->pageshadow[ctxttid + tid] = NULL;
  392. }
  393. }
  394. qib_release_user_pages(pagep, cnt);
  395. } else {
  396. /*
  397. * Copy the updated array, with qib_tid's filled in, back
  398. * to user. Since we did the copy in already, this "should
  399. * never fail" If it does, we have to clean up...
  400. */
  401. if (copy_to_user((void __user *)
  402. (unsigned long) ti->tidlist,
  403. tidlist, cnt * sizeof(*tidlist))) {
  404. ret = -EFAULT;
  405. goto cleanup;
  406. }
  407. if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
  408. tidmap, sizeof tidmap)) {
  409. ret = -EFAULT;
  410. goto cleanup;
  411. }
  412. if (tid == tidcnt)
  413. tid = 0;
  414. if (!rcd->subctxt_cnt)
  415. rcd->tidcursor = tid;
  416. else
  417. tidcursor_fp(fp) = tid;
  418. }
  419. done:
  420. return ret;
  421. }
  422. /**
  423. * qib_tid_free - free a context TID
  424. * @rcd: the context
  425. * @subctxt: the subcontext
  426. * @ti: the TID info
  427. *
  428. * right now we are unlocking one page at a time, but since
  429. * the intended use of this routine is for a single group of
  430. * virtually contiguous pages, that should change to improve
  431. * performance. We check that the TID is in range for this context
  432. * but otherwise don't check validity; if user has an error and
  433. * frees the wrong tid, it's only their own data that can thereby
  434. * be corrupted. We do check that the TID was in use, for sanity
  435. * We always use our idea of the saved address, not the address that
  436. * they pass in to us.
  437. */
  438. static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
  439. const struct qib_tid_info *ti)
  440. {
  441. int ret = 0;
  442. u32 tid, ctxttid, cnt, limit, tidcnt;
  443. struct qib_devdata *dd = rcd->dd;
  444. u64 __iomem *tidbase;
  445. unsigned long tidmap[8];
  446. if (!dd->pageshadow) {
  447. ret = -ENOMEM;
  448. goto done;
  449. }
  450. if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
  451. sizeof tidmap)) {
  452. ret = -EFAULT;
  453. goto done;
  454. }
  455. ctxttid = rcd->ctxt * dd->rcvtidcnt;
  456. if (!rcd->subctxt_cnt)
  457. tidcnt = dd->rcvtidcnt;
  458. else if (!subctxt) {
  459. tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
  460. (dd->rcvtidcnt % rcd->subctxt_cnt);
  461. ctxttid += dd->rcvtidcnt - tidcnt;
  462. } else {
  463. tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
  464. ctxttid += tidcnt * (subctxt - 1);
  465. }
  466. tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
  467. dd->rcvtidbase +
  468. ctxttid * sizeof(*tidbase));
  469. limit = sizeof(tidmap) * BITS_PER_BYTE;
  470. if (limit > tidcnt)
  471. /* just in case size changes in future */
  472. limit = tidcnt;
  473. tid = find_first_bit(tidmap, limit);
  474. for (cnt = 0; tid < limit; tid++) {
  475. /*
  476. * small optimization; if we detect a run of 3 or so without
  477. * any set, use find_first_bit again. That's mainly to
  478. * accelerate the case where we wrapped, so we have some at
  479. * the beginning, and some at the end, and a big gap
  480. * in the middle.
  481. */
  482. if (!test_bit(tid, tidmap))
  483. continue;
  484. cnt++;
  485. if (dd->pageshadow[ctxttid + tid]) {
  486. struct page *p;
  487. dma_addr_t phys;
  488. p = dd->pageshadow[ctxttid + tid];
  489. dd->pageshadow[ctxttid + tid] = NULL;
  490. phys = dd->physshadow[ctxttid + tid];
  491. dd->physshadow[ctxttid + tid] = dd->tidinvalid;
  492. /* PERFORMANCE: below should almost certainly be
  493. * cached
  494. */
  495. dd->f_put_tid(dd, &tidbase[tid],
  496. RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
  497. pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
  498. PCI_DMA_FROMDEVICE);
  499. qib_release_user_pages(&p, 1);
  500. }
  501. }
  502. done:
  503. return ret;
  504. }
  505. /**
  506. * qib_set_part_key - set a partition key
  507. * @rcd: the context
  508. * @key: the key
  509. *
  510. * We can have up to 4 active at a time (other than the default, which is
  511. * always allowed). This is somewhat tricky, since multiple contexts may set
  512. * the same key, so we reference count them, and clean up at exit. All 4
  513. * partition keys are packed into a single qlogic_ib register. It's an
  514. * error for a process to set the same pkey multiple times. We provide no
  515. * mechanism to de-allocate a pkey at this time, we may eventually need to
  516. * do that. I've used the atomic operations, and no locking, and only make
  517. * a single pass through what's available. This should be more than
  518. * adequate for some time. I'll think about spinlocks or the like if and as
  519. * it's necessary.
  520. */
  521. static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
  522. {
  523. struct qib_pportdata *ppd = rcd->ppd;
  524. int i, any = 0, pidx = -1;
  525. u16 lkey = key & 0x7FFF;
  526. int ret;
  527. if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
  528. /* nothing to do; this key always valid */
  529. ret = 0;
  530. goto bail;
  531. }
  532. if (!lkey) {
  533. ret = -EINVAL;
  534. goto bail;
  535. }
  536. /*
  537. * Set the full membership bit, because it has to be
  538. * set in the register or the packet, and it seems
  539. * cleaner to set in the register than to force all
  540. * callers to set it.
  541. */
  542. key |= 0x8000;
  543. for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
  544. if (!rcd->pkeys[i] && pidx == -1)
  545. pidx = i;
  546. if (rcd->pkeys[i] == key) {
  547. ret = -EEXIST;
  548. goto bail;
  549. }
  550. }
  551. if (pidx == -1) {
  552. ret = -EBUSY;
  553. goto bail;
  554. }
  555. for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
  556. if (!ppd->pkeys[i]) {
  557. any++;
  558. continue;
  559. }
  560. if (ppd->pkeys[i] == key) {
  561. atomic_t *pkrefs = &ppd->pkeyrefs[i];
  562. if (atomic_inc_return(pkrefs) > 1) {
  563. rcd->pkeys[pidx] = key;
  564. ret = 0;
  565. goto bail;
  566. } else {
  567. /*
  568. * lost race, decrement count, catch below
  569. */
  570. atomic_dec(pkrefs);
  571. any++;
  572. }
  573. }
  574. if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
  575. /*
  576. * It makes no sense to have both the limited and
  577. * full membership PKEY set at the same time since
  578. * the unlimited one will disable the limited one.
  579. */
  580. ret = -EEXIST;
  581. goto bail;
  582. }
  583. }
  584. if (!any) {
  585. ret = -EBUSY;
  586. goto bail;
  587. }
  588. for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
  589. if (!ppd->pkeys[i] &&
  590. atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
  591. rcd->pkeys[pidx] = key;
  592. ppd->pkeys[i] = key;
  593. (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
  594. ret = 0;
  595. goto bail;
  596. }
  597. }
  598. ret = -EBUSY;
  599. bail:
  600. return ret;
  601. }
  602. /**
  603. * qib_manage_rcvq - manage a context's receive queue
  604. * @rcd: the context
  605. * @subctxt: the subcontext
  606. * @start_stop: action to carry out
  607. *
  608. * start_stop == 0 disables receive on the context, for use in queue
  609. * overflow conditions. start_stop==1 re-enables, to be used to
  610. * re-init the software copy of the head register
  611. */
  612. static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
  613. int start_stop)
  614. {
  615. struct qib_devdata *dd = rcd->dd;
  616. unsigned int rcvctrl_op;
  617. if (subctxt)
  618. goto bail;
  619. /* atomically clear receive enable ctxt. */
  620. if (start_stop) {
  621. /*
  622. * On enable, force in-memory copy of the tail register to
  623. * 0, so that protocol code doesn't have to worry about
  624. * whether or not the chip has yet updated the in-memory
  625. * copy or not on return from the system call. The chip
  626. * always resets it's tail register back to 0 on a
  627. * transition from disabled to enabled.
  628. */
  629. if (rcd->rcvhdrtail_kvaddr)
  630. qib_clear_rcvhdrtail(rcd);
  631. rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
  632. } else
  633. rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
  634. dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
  635. /* always; new head should be equal to new tail; see above */
  636. bail:
  637. return 0;
  638. }
  639. static void qib_clean_part_key(struct qib_ctxtdata *rcd,
  640. struct qib_devdata *dd)
  641. {
  642. int i, j, pchanged = 0;
  643. u64 oldpkey;
  644. struct qib_pportdata *ppd = rcd->ppd;
  645. /* for debugging only */
  646. oldpkey = (u64) ppd->pkeys[0] |
  647. ((u64) ppd->pkeys[1] << 16) |
  648. ((u64) ppd->pkeys[2] << 32) |
  649. ((u64) ppd->pkeys[3] << 48);
  650. for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
  651. if (!rcd->pkeys[i])
  652. continue;
  653. for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
  654. /* check for match independent of the global bit */
  655. if ((ppd->pkeys[j] & 0x7fff) !=
  656. (rcd->pkeys[i] & 0x7fff))
  657. continue;
  658. if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
  659. ppd->pkeys[j] = 0;
  660. pchanged++;
  661. }
  662. break;
  663. }
  664. rcd->pkeys[i] = 0;
  665. }
  666. if (pchanged)
  667. (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
  668. }
  669. /* common code for the mappings on dma_alloc_coherent mem */
  670. static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
  671. unsigned len, void *kvaddr, u32 write_ok, char *what)
  672. {
  673. struct qib_devdata *dd = rcd->dd;
  674. unsigned long pfn;
  675. int ret;
  676. if ((vma->vm_end - vma->vm_start) > len) {
  677. qib_devinfo(dd->pcidev,
  678. "FAIL on %s: len %lx > %x\n", what,
  679. vma->vm_end - vma->vm_start, len);
  680. ret = -EFAULT;
  681. goto bail;
  682. }
  683. /*
  684. * shared context user code requires rcvhdrq mapped r/w, others
  685. * only allowed readonly mapping.
  686. */
  687. if (!write_ok) {
  688. if (vma->vm_flags & VM_WRITE) {
  689. qib_devinfo(dd->pcidev,
  690. "%s must be mapped readonly\n", what);
  691. ret = -EPERM;
  692. goto bail;
  693. }
  694. /* don't allow them to later change with mprotect */
  695. vma->vm_flags &= ~VM_MAYWRITE;
  696. }
  697. pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
  698. ret = remap_pfn_range(vma, vma->vm_start, pfn,
  699. len, vma->vm_page_prot);
  700. if (ret)
  701. qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
  702. "bytes failed: %d\n", what, rcd->ctxt,
  703. pfn, len, ret);
  704. bail:
  705. return ret;
  706. }
  707. static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
  708. u64 ureg)
  709. {
  710. unsigned long phys;
  711. unsigned long sz;
  712. int ret;
  713. /*
  714. * This is real hardware, so use io_remap. This is the mechanism
  715. * for the user process to update the head registers for their ctxt
  716. * in the chip.
  717. */
  718. sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
  719. if ((vma->vm_end - vma->vm_start) > sz) {
  720. qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
  721. "%lx > PAGE\n", vma->vm_end - vma->vm_start);
  722. ret = -EFAULT;
  723. } else {
  724. phys = dd->physaddr + ureg;
  725. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  726. vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
  727. ret = io_remap_pfn_range(vma, vma->vm_start,
  728. phys >> PAGE_SHIFT,
  729. vma->vm_end - vma->vm_start,
  730. vma->vm_page_prot);
  731. }
  732. return ret;
  733. }
  734. static int mmap_piobufs(struct vm_area_struct *vma,
  735. struct qib_devdata *dd,
  736. struct qib_ctxtdata *rcd,
  737. unsigned piobufs, unsigned piocnt)
  738. {
  739. unsigned long phys;
  740. int ret;
  741. /*
  742. * When we map the PIO buffers in the chip, we want to map them as
  743. * writeonly, no read possible; unfortunately, x86 doesn't allow
  744. * for this in hardware, but we still prevent users from asking
  745. * for it.
  746. */
  747. if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
  748. qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
  749. "reqlen %lx > PAGE\n",
  750. vma->vm_end - vma->vm_start);
  751. ret = -EINVAL;
  752. goto bail;
  753. }
  754. phys = dd->physaddr + piobufs;
  755. #if defined(__powerpc__)
  756. /* There isn't a generic way to specify writethrough mappings */
  757. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
  758. pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
  759. pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
  760. #endif
  761. /*
  762. * don't allow them to later change to readable with mprotect (for when
  763. * not initially mapped readable, as is normally the case)
  764. */
  765. vma->vm_flags &= ~VM_MAYREAD;
  766. vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
  767. if (qib_wc_pat)
  768. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  769. ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
  770. vma->vm_end - vma->vm_start,
  771. vma->vm_page_prot);
  772. bail:
  773. return ret;
  774. }
  775. static int mmap_rcvegrbufs(struct vm_area_struct *vma,
  776. struct qib_ctxtdata *rcd)
  777. {
  778. struct qib_devdata *dd = rcd->dd;
  779. unsigned long start, size;
  780. size_t total_size, i;
  781. unsigned long pfn;
  782. int ret;
  783. size = rcd->rcvegrbuf_size;
  784. total_size = rcd->rcvegrbuf_chunks * size;
  785. if ((vma->vm_end - vma->vm_start) > total_size) {
  786. qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
  787. "reqlen %lx > actual %lx\n",
  788. vma->vm_end - vma->vm_start,
  789. (unsigned long) total_size);
  790. ret = -EINVAL;
  791. goto bail;
  792. }
  793. if (vma->vm_flags & VM_WRITE) {
  794. qib_devinfo(dd->pcidev, "Can't map eager buffers as "
  795. "writable (flags=%lx)\n", vma->vm_flags);
  796. ret = -EPERM;
  797. goto bail;
  798. }
  799. /* don't allow them to later change to writeable with mprotect */
  800. vma->vm_flags &= ~VM_MAYWRITE;
  801. start = vma->vm_start;
  802. for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
  803. pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
  804. ret = remap_pfn_range(vma, start, pfn, size,
  805. vma->vm_page_prot);
  806. if (ret < 0)
  807. goto bail;
  808. }
  809. ret = 0;
  810. bail:
  811. return ret;
  812. }
  813. /*
  814. * qib_file_vma_fault - handle a VMA page fault.
  815. */
  816. static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  817. {
  818. struct page *page;
  819. page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
  820. if (!page)
  821. return VM_FAULT_SIGBUS;
  822. get_page(page);
  823. vmf->page = page;
  824. return 0;
  825. }
  826. static struct vm_operations_struct qib_file_vm_ops = {
  827. .fault = qib_file_vma_fault,
  828. };
  829. static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
  830. struct qib_ctxtdata *rcd, unsigned subctxt)
  831. {
  832. struct qib_devdata *dd = rcd->dd;
  833. unsigned subctxt_cnt;
  834. unsigned long len;
  835. void *addr;
  836. size_t size;
  837. int ret = 0;
  838. subctxt_cnt = rcd->subctxt_cnt;
  839. size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
  840. /*
  841. * Each process has all the subctxt uregbase, rcvhdrq, and
  842. * rcvegrbufs mmapped - as an array for all the processes,
  843. * and also separately for this process.
  844. */
  845. if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
  846. addr = rcd->subctxt_uregbase;
  847. size = PAGE_SIZE * subctxt_cnt;
  848. } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
  849. addr = rcd->subctxt_rcvhdr_base;
  850. size = rcd->rcvhdrq_size * subctxt_cnt;
  851. } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
  852. addr = rcd->subctxt_rcvegrbuf;
  853. size *= subctxt_cnt;
  854. } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
  855. PAGE_SIZE * subctxt)) {
  856. addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
  857. size = PAGE_SIZE;
  858. } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
  859. rcd->rcvhdrq_size * subctxt)) {
  860. addr = rcd->subctxt_rcvhdr_base +
  861. rcd->rcvhdrq_size * subctxt;
  862. size = rcd->rcvhdrq_size;
  863. } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
  864. addr = rcd->user_event_mask;
  865. size = PAGE_SIZE;
  866. } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
  867. size * subctxt)) {
  868. addr = rcd->subctxt_rcvegrbuf + size * subctxt;
  869. /* rcvegrbufs are read-only on the slave */
  870. if (vma->vm_flags & VM_WRITE) {
  871. qib_devinfo(dd->pcidev,
  872. "Can't map eager buffers as "
  873. "writable (flags=%lx)\n", vma->vm_flags);
  874. ret = -EPERM;
  875. goto bail;
  876. }
  877. /*
  878. * Don't allow permission to later change to writeable
  879. * with mprotect.
  880. */
  881. vma->vm_flags &= ~VM_MAYWRITE;
  882. } else
  883. goto bail;
  884. len = vma->vm_end - vma->vm_start;
  885. if (len > size) {
  886. ret = -EINVAL;
  887. goto bail;
  888. }
  889. vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
  890. vma->vm_ops = &qib_file_vm_ops;
  891. vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
  892. ret = 1;
  893. bail:
  894. return ret;
  895. }
  896. /**
  897. * qib_mmapf - mmap various structures into user space
  898. * @fp: the file pointer
  899. * @vma: the VM area
  900. *
  901. * We use this to have a shared buffer between the kernel and the user code
  902. * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
  903. * buffers in the chip. We have the open and close entries so we can bump
  904. * the ref count and keep the driver from being unloaded while still mapped.
  905. */
  906. static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
  907. {
  908. struct qib_ctxtdata *rcd;
  909. struct qib_devdata *dd;
  910. u64 pgaddr, ureg;
  911. unsigned piobufs, piocnt;
  912. int ret, match = 1;
  913. rcd = ctxt_fp(fp);
  914. if (!rcd || !(vma->vm_flags & VM_SHARED)) {
  915. ret = -EINVAL;
  916. goto bail;
  917. }
  918. dd = rcd->dd;
  919. /*
  920. * This is the qib_do_user_init() code, mapping the shared buffers
  921. * and per-context user registers into the user process. The address
  922. * referred to by vm_pgoff is the file offset passed via mmap().
  923. * For shared contexts, this is the kernel vmalloc() address of the
  924. * pages to share with the master.
  925. * For non-shared or master ctxts, this is a physical address.
  926. * We only do one mmap for each space mapped.
  927. */
  928. pgaddr = vma->vm_pgoff << PAGE_SHIFT;
  929. /*
  930. * Check for 0 in case one of the allocations failed, but user
  931. * called mmap anyway.
  932. */
  933. if (!pgaddr) {
  934. ret = -EINVAL;
  935. goto bail;
  936. }
  937. /*
  938. * Physical addresses must fit in 40 bits for our hardware.
  939. * Check for kernel virtual addresses first, anything else must
  940. * match a HW or memory address.
  941. */
  942. ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
  943. if (ret) {
  944. if (ret > 0)
  945. ret = 0;
  946. goto bail;
  947. }
  948. ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
  949. if (!rcd->subctxt_cnt) {
  950. /* ctxt is not shared */
  951. piocnt = rcd->piocnt;
  952. piobufs = rcd->piobufs;
  953. } else if (!subctxt_fp(fp)) {
  954. /* caller is the master */
  955. piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
  956. (rcd->piocnt % rcd->subctxt_cnt);
  957. piobufs = rcd->piobufs +
  958. dd->palign * (rcd->piocnt - piocnt);
  959. } else {
  960. unsigned slave = subctxt_fp(fp) - 1;
  961. /* caller is a slave */
  962. piocnt = rcd->piocnt / rcd->subctxt_cnt;
  963. piobufs = rcd->piobufs + dd->palign * piocnt * slave;
  964. }
  965. if (pgaddr == ureg)
  966. ret = mmap_ureg(vma, dd, ureg);
  967. else if (pgaddr == piobufs)
  968. ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
  969. else if (pgaddr == dd->pioavailregs_phys)
  970. /* in-memory copy of pioavail registers */
  971. ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
  972. (void *) dd->pioavailregs_dma, 0,
  973. "pioavail registers");
  974. else if (pgaddr == rcd->rcvegr_phys)
  975. ret = mmap_rcvegrbufs(vma, rcd);
  976. else if (pgaddr == (u64) rcd->rcvhdrq_phys)
  977. /*
  978. * The rcvhdrq itself; multiple pages, contiguous
  979. * from an i/o perspective. Shared contexts need
  980. * to map r/w, so we allow writing.
  981. */
  982. ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
  983. rcd->rcvhdrq, 1, "rcvhdrq");
  984. else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
  985. /* in-memory copy of rcvhdrq tail register */
  986. ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
  987. rcd->rcvhdrtail_kvaddr, 0,
  988. "rcvhdrq tail");
  989. else
  990. match = 0;
  991. if (!match)
  992. ret = -EINVAL;
  993. vma->vm_private_data = NULL;
  994. if (ret < 0)
  995. qib_devinfo(dd->pcidev,
  996. "mmap Failure %d: off %llx len %lx\n",
  997. -ret, (unsigned long long)pgaddr,
  998. vma->vm_end - vma->vm_start);
  999. bail:
  1000. return ret;
  1001. }
  1002. static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
  1003. struct file *fp,
  1004. struct poll_table_struct *pt)
  1005. {
  1006. struct qib_devdata *dd = rcd->dd;
  1007. unsigned pollflag;
  1008. poll_wait(fp, &rcd->wait, pt);
  1009. spin_lock_irq(&dd->uctxt_lock);
  1010. if (rcd->urgent != rcd->urgent_poll) {
  1011. pollflag = POLLIN | POLLRDNORM;
  1012. rcd->urgent_poll = rcd->urgent;
  1013. } else {
  1014. pollflag = 0;
  1015. set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
  1016. }
  1017. spin_unlock_irq(&dd->uctxt_lock);
  1018. return pollflag;
  1019. }
  1020. static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
  1021. struct file *fp,
  1022. struct poll_table_struct *pt)
  1023. {
  1024. struct qib_devdata *dd = rcd->dd;
  1025. unsigned pollflag;
  1026. poll_wait(fp, &rcd->wait, pt);
  1027. spin_lock_irq(&dd->uctxt_lock);
  1028. if (dd->f_hdrqempty(rcd)) {
  1029. set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
  1030. dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
  1031. pollflag = 0;
  1032. } else
  1033. pollflag = POLLIN | POLLRDNORM;
  1034. spin_unlock_irq(&dd->uctxt_lock);
  1035. return pollflag;
  1036. }
  1037. static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
  1038. {
  1039. struct qib_ctxtdata *rcd;
  1040. unsigned pollflag;
  1041. rcd = ctxt_fp(fp);
  1042. if (!rcd)
  1043. pollflag = POLLERR;
  1044. else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
  1045. pollflag = qib_poll_urgent(rcd, fp, pt);
  1046. else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
  1047. pollflag = qib_poll_next(rcd, fp, pt);
  1048. else /* invalid */
  1049. pollflag = POLLERR;
  1050. return pollflag;
  1051. }
  1052. /*
  1053. * Check that userland and driver are compatible for subcontexts.
  1054. */
  1055. static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
  1056. {
  1057. /* this code is written long-hand for clarity */
  1058. if (QIB_USER_SWMAJOR != user_swmajor) {
  1059. /* no promise of compatibility if major mismatch */
  1060. return 0;
  1061. }
  1062. if (QIB_USER_SWMAJOR == 1) {
  1063. switch (QIB_USER_SWMINOR) {
  1064. case 0:
  1065. case 1:
  1066. case 2:
  1067. /* no subctxt implementation so cannot be compatible */
  1068. return 0;
  1069. case 3:
  1070. /* 3 is only compatible with itself */
  1071. return user_swminor == 3;
  1072. default:
  1073. /* >= 4 are compatible (or are expected to be) */
  1074. return user_swminor >= 4;
  1075. }
  1076. }
  1077. /* make no promises yet for future major versions */
  1078. return 0;
  1079. }
  1080. static int init_subctxts(struct qib_devdata *dd,
  1081. struct qib_ctxtdata *rcd,
  1082. const struct qib_user_info *uinfo)
  1083. {
  1084. int ret = 0;
  1085. unsigned num_subctxts;
  1086. size_t size;
  1087. /*
  1088. * If the user is requesting zero subctxts,
  1089. * skip the subctxt allocation.
  1090. */
  1091. if (uinfo->spu_subctxt_cnt <= 0)
  1092. goto bail;
  1093. num_subctxts = uinfo->spu_subctxt_cnt;
  1094. /* Check for subctxt compatibility */
  1095. if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
  1096. uinfo->spu_userversion & 0xffff)) {
  1097. qib_devinfo(dd->pcidev,
  1098. "Mismatched user version (%d.%d) and driver "
  1099. "version (%d.%d) while context sharing. Ensure "
  1100. "that driver and library are from the same "
  1101. "release.\n",
  1102. (int) (uinfo->spu_userversion >> 16),
  1103. (int) (uinfo->spu_userversion & 0xffff),
  1104. QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
  1105. goto bail;
  1106. }
  1107. if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
  1108. ret = -EINVAL;
  1109. goto bail;
  1110. }
  1111. rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
  1112. if (!rcd->subctxt_uregbase) {
  1113. ret = -ENOMEM;
  1114. goto bail;
  1115. }
  1116. /* Note: rcd->rcvhdrq_size isn't initialized yet. */
  1117. size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
  1118. sizeof(u32), PAGE_SIZE) * num_subctxts;
  1119. rcd->subctxt_rcvhdr_base = vmalloc_user(size);
  1120. if (!rcd->subctxt_rcvhdr_base) {
  1121. ret = -ENOMEM;
  1122. goto bail_ureg;
  1123. }
  1124. rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
  1125. rcd->rcvegrbuf_size *
  1126. num_subctxts);
  1127. if (!rcd->subctxt_rcvegrbuf) {
  1128. ret = -ENOMEM;
  1129. goto bail_rhdr;
  1130. }
  1131. rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
  1132. rcd->subctxt_id = uinfo->spu_subctxt_id;
  1133. rcd->active_slaves = 1;
  1134. rcd->redirect_seq_cnt = 1;
  1135. set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
  1136. goto bail;
  1137. bail_rhdr:
  1138. vfree(rcd->subctxt_rcvhdr_base);
  1139. bail_ureg:
  1140. vfree(rcd->subctxt_uregbase);
  1141. rcd->subctxt_uregbase = NULL;
  1142. bail:
  1143. return ret;
  1144. }
  1145. static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
  1146. struct file *fp, const struct qib_user_info *uinfo)
  1147. {
  1148. struct qib_devdata *dd = ppd->dd;
  1149. struct qib_ctxtdata *rcd;
  1150. void *ptmp = NULL;
  1151. int ret;
  1152. rcd = qib_create_ctxtdata(ppd, ctxt);
  1153. /*
  1154. * Allocate memory for use in qib_tid_update() at open to
  1155. * reduce cost of expected send setup per message segment
  1156. */
  1157. if (rcd)
  1158. ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
  1159. dd->rcvtidcnt * sizeof(struct page **),
  1160. GFP_KERNEL);
  1161. if (!rcd || !ptmp) {
  1162. qib_dev_err(dd, "Unable to allocate ctxtdata "
  1163. "memory, failing open\n");
  1164. ret = -ENOMEM;
  1165. goto bailerr;
  1166. }
  1167. rcd->userversion = uinfo->spu_userversion;
  1168. ret = init_subctxts(dd, rcd, uinfo);
  1169. if (ret)
  1170. goto bailerr;
  1171. rcd->tid_pg_list = ptmp;
  1172. rcd->pid = current->pid;
  1173. init_waitqueue_head(&dd->rcd[ctxt]->wait);
  1174. strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
  1175. ctxt_fp(fp) = rcd;
  1176. qib_stats.sps_ctxts++;
  1177. ret = 0;
  1178. goto bail;
  1179. bailerr:
  1180. dd->rcd[ctxt] = NULL;
  1181. kfree(rcd);
  1182. kfree(ptmp);
  1183. bail:
  1184. return ret;
  1185. }
  1186. static inline int usable(struct qib_pportdata *ppd)
  1187. {
  1188. struct qib_devdata *dd = ppd->dd;
  1189. return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
  1190. (ppd->lflags & QIBL_LINKACTIVE);
  1191. }
  1192. /*
  1193. * Select a context on the given device, either using a requested port
  1194. * or the port based on the context number.
  1195. */
  1196. static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
  1197. const struct qib_user_info *uinfo)
  1198. {
  1199. struct qib_pportdata *ppd = NULL;
  1200. int ret, ctxt;
  1201. if (port) {
  1202. if (!usable(dd->pport + port - 1)) {
  1203. ret = -ENETDOWN;
  1204. goto done;
  1205. } else
  1206. ppd = dd->pport + port - 1;
  1207. }
  1208. for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
  1209. ctxt++)
  1210. ;
  1211. if (ctxt == dd->cfgctxts) {
  1212. ret = -EBUSY;
  1213. goto done;
  1214. }
  1215. if (!ppd) {
  1216. u32 pidx = ctxt % dd->num_pports;
  1217. if (usable(dd->pport + pidx))
  1218. ppd = dd->pport + pidx;
  1219. else {
  1220. for (pidx = 0; pidx < dd->num_pports && !ppd;
  1221. pidx++)
  1222. if (usable(dd->pport + pidx))
  1223. ppd = dd->pport + pidx;
  1224. }
  1225. }
  1226. ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
  1227. done:
  1228. return ret;
  1229. }
  1230. static int find_free_ctxt(int unit, struct file *fp,
  1231. const struct qib_user_info *uinfo)
  1232. {
  1233. struct qib_devdata *dd = qib_lookup(unit);
  1234. int ret;
  1235. if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
  1236. ret = -ENODEV;
  1237. else
  1238. ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
  1239. return ret;
  1240. }
  1241. static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
  1242. unsigned alg)
  1243. {
  1244. struct qib_devdata *udd = NULL;
  1245. int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
  1246. u32 port = uinfo->spu_port, ctxt;
  1247. devmax = qib_count_units(&npresent, &nup);
  1248. if (!npresent) {
  1249. ret = -ENXIO;
  1250. goto done;
  1251. }
  1252. if (nup == 0) {
  1253. ret = -ENETDOWN;
  1254. goto done;
  1255. }
  1256. if (alg == QIB_PORT_ALG_ACROSS) {
  1257. unsigned inuse = ~0U;
  1258. /* find device (with ACTIVE ports) with fewest ctxts in use */
  1259. for (ndev = 0; ndev < devmax; ndev++) {
  1260. struct qib_devdata *dd = qib_lookup(ndev);
  1261. unsigned cused = 0, cfree = 0, pusable = 0;
  1262. if (!dd)
  1263. continue;
  1264. if (port && port <= dd->num_pports &&
  1265. usable(dd->pport + port - 1))
  1266. pusable = 1;
  1267. else
  1268. for (i = 0; i < dd->num_pports; i++)
  1269. if (usable(dd->pport + i))
  1270. pusable++;
  1271. if (!pusable)
  1272. continue;
  1273. for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
  1274. ctxt++)
  1275. if (dd->rcd[ctxt])
  1276. cused++;
  1277. else
  1278. cfree++;
  1279. if (pusable && cfree && cused < inuse) {
  1280. udd = dd;
  1281. inuse = cused;
  1282. }
  1283. }
  1284. if (udd) {
  1285. ret = choose_port_ctxt(fp, udd, port, uinfo);
  1286. goto done;
  1287. }
  1288. } else {
  1289. for (ndev = 0; ndev < devmax; ndev++) {
  1290. struct qib_devdata *dd = qib_lookup(ndev);
  1291. if (dd) {
  1292. ret = choose_port_ctxt(fp, dd, port, uinfo);
  1293. if (!ret)
  1294. goto done;
  1295. if (ret == -EBUSY)
  1296. dusable++;
  1297. }
  1298. }
  1299. }
  1300. ret = dusable ? -EBUSY : -ENETDOWN;
  1301. done:
  1302. return ret;
  1303. }
  1304. static int find_shared_ctxt(struct file *fp,
  1305. const struct qib_user_info *uinfo)
  1306. {
  1307. int devmax, ndev, i;
  1308. int ret = 0;
  1309. devmax = qib_count_units(NULL, NULL);
  1310. for (ndev = 0; ndev < devmax; ndev++) {
  1311. struct qib_devdata *dd = qib_lookup(ndev);
  1312. /* device portion of usable() */
  1313. if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
  1314. continue;
  1315. for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
  1316. struct qib_ctxtdata *rcd = dd->rcd[i];
  1317. /* Skip ctxts which are not yet open */
  1318. if (!rcd || !rcd->cnt)
  1319. continue;
  1320. /* Skip ctxt if it doesn't match the requested one */
  1321. if (rcd->subctxt_id != uinfo->spu_subctxt_id)
  1322. continue;
  1323. /* Verify the sharing process matches the master */
  1324. if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
  1325. rcd->userversion != uinfo->spu_userversion ||
  1326. rcd->cnt >= rcd->subctxt_cnt) {
  1327. ret = -EINVAL;
  1328. goto done;
  1329. }
  1330. ctxt_fp(fp) = rcd;
  1331. subctxt_fp(fp) = rcd->cnt++;
  1332. rcd->subpid[subctxt_fp(fp)] = current->pid;
  1333. tidcursor_fp(fp) = 0;
  1334. rcd->active_slaves |= 1 << subctxt_fp(fp);
  1335. ret = 1;
  1336. goto done;
  1337. }
  1338. }
  1339. done:
  1340. return ret;
  1341. }
  1342. static int qib_open(struct inode *in, struct file *fp)
  1343. {
  1344. /* The real work is performed later in qib_assign_ctxt() */
  1345. fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
  1346. if (fp->private_data) /* no cpu affinity by default */
  1347. ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
  1348. return fp->private_data ? 0 : -ENOMEM;
  1349. }
  1350. /*
  1351. * Get ctxt early, so can set affinity prior to memory allocation.
  1352. */
  1353. static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
  1354. {
  1355. int ret;
  1356. int i_minor;
  1357. unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
  1358. /* Check to be sure we haven't already initialized this file */
  1359. if (ctxt_fp(fp)) {
  1360. ret = -EINVAL;
  1361. goto done;
  1362. }
  1363. /* for now, if major version is different, bail */
  1364. swmajor = uinfo->spu_userversion >> 16;
  1365. if (swmajor != QIB_USER_SWMAJOR) {
  1366. ret = -ENODEV;
  1367. goto done;
  1368. }
  1369. swminor = uinfo->spu_userversion & 0xffff;
  1370. if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
  1371. alg = uinfo->spu_port_alg;
  1372. mutex_lock(&qib_mutex);
  1373. if (qib_compatible_subctxts(swmajor, swminor) &&
  1374. uinfo->spu_subctxt_cnt) {
  1375. ret = find_shared_ctxt(fp, uinfo);
  1376. if (ret) {
  1377. if (ret > 0)
  1378. ret = 0;
  1379. goto done_chk_sdma;
  1380. }
  1381. }
  1382. i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
  1383. if (i_minor)
  1384. ret = find_free_ctxt(i_minor - 1, fp, uinfo);
  1385. else
  1386. ret = get_a_ctxt(fp, uinfo, alg);
  1387. done_chk_sdma:
  1388. if (!ret) {
  1389. struct qib_filedata *fd = fp->private_data;
  1390. const struct qib_ctxtdata *rcd = fd->rcd;
  1391. const struct qib_devdata *dd = rcd->dd;
  1392. if (dd->flags & QIB_HAS_SEND_DMA) {
  1393. fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
  1394. dd->unit,
  1395. rcd->ctxt,
  1396. fd->subctxt);
  1397. if (!fd->pq)
  1398. ret = -ENOMEM;
  1399. }
  1400. /*
  1401. * If process has NOT already set it's affinity, select and
  1402. * reserve a processor for it, as a rendevous for all
  1403. * users of the driver. If they don't actually later
  1404. * set affinity to this cpu, or set it to some other cpu,
  1405. * it just means that sooner or later we don't recommend
  1406. * a cpu, and let the scheduler do it's best.
  1407. */
  1408. if (!ret && cpus_weight(current->cpus_allowed) >=
  1409. qib_cpulist_count) {
  1410. int cpu;
  1411. cpu = find_first_zero_bit(qib_cpulist,
  1412. qib_cpulist_count);
  1413. if (cpu != qib_cpulist_count) {
  1414. __set_bit(cpu, qib_cpulist);
  1415. fd->rec_cpu_num = cpu;
  1416. }
  1417. } else if (cpus_weight(current->cpus_allowed) == 1 &&
  1418. test_bit(first_cpu(current->cpus_allowed),
  1419. qib_cpulist))
  1420. qib_devinfo(dd->pcidev, "%s PID %u affinity "
  1421. "set to cpu %d; already allocated\n",
  1422. current->comm, current->pid,
  1423. first_cpu(current->cpus_allowed));
  1424. }
  1425. mutex_unlock(&qib_mutex);
  1426. done:
  1427. return ret;
  1428. }
  1429. static int qib_do_user_init(struct file *fp,
  1430. const struct qib_user_info *uinfo)
  1431. {
  1432. int ret;
  1433. struct qib_ctxtdata *rcd = ctxt_fp(fp);
  1434. struct qib_devdata *dd;
  1435. unsigned uctxt;
  1436. /* Subctxts don't need to initialize anything since master did it. */
  1437. if (subctxt_fp(fp)) {
  1438. ret = wait_event_interruptible(rcd->wait,
  1439. !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
  1440. goto bail;
  1441. }
  1442. dd = rcd->dd;
  1443. /* some ctxts may get extra buffers, calculate that here */
  1444. uctxt = rcd->ctxt - dd->first_user_ctxt;
  1445. if (uctxt < dd->ctxts_extrabuf) {
  1446. rcd->piocnt = dd->pbufsctxt + 1;
  1447. rcd->pio_base = rcd->piocnt * uctxt;
  1448. } else {
  1449. rcd->piocnt = dd->pbufsctxt;
  1450. rcd->pio_base = rcd->piocnt * uctxt +
  1451. dd->ctxts_extrabuf;
  1452. }
  1453. /*
  1454. * All user buffers are 2KB buffers. If we ever support
  1455. * giving 4KB buffers to user processes, this will need some
  1456. * work. Can't use piobufbase directly, because it has
  1457. * both 2K and 4K buffer base values. So check and handle.
  1458. */
  1459. if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
  1460. if (rcd->pio_base >= dd->piobcnt2k) {
  1461. qib_dev_err(dd,
  1462. "%u:ctxt%u: no 2KB buffers available\n",
  1463. dd->unit, rcd->ctxt);
  1464. ret = -ENOBUFS;
  1465. goto bail;
  1466. }
  1467. rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
  1468. qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
  1469. rcd->ctxt, rcd->piocnt);
  1470. }
  1471. rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
  1472. qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
  1473. TXCHK_CHG_TYPE_USER, rcd);
  1474. /*
  1475. * try to ensure that processes start up with consistent avail update
  1476. * for their own range, at least. If system very quiet, it might
  1477. * have the in-memory copy out of date at startup for this range of
  1478. * buffers, when a context gets re-used. Do after the chg_pioavail
  1479. * and before the rest of setup, so it's "almost certain" the dma
  1480. * will have occurred (can't 100% guarantee, but should be many
  1481. * decimals of 9s, with this ordering), given how much else happens
  1482. * after this.
  1483. */
  1484. dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  1485. /*
  1486. * Now allocate the rcvhdr Q and eager TIDs; skip the TID
  1487. * array for time being. If rcd->ctxt > chip-supported,
  1488. * we need to do extra stuff here to handle by handling overflow
  1489. * through ctxt 0, someday
  1490. */
  1491. ret = qib_create_rcvhdrq(dd, rcd);
  1492. if (!ret)
  1493. ret = qib_setup_eagerbufs(rcd);
  1494. if (ret)
  1495. goto bail_pio;
  1496. rcd->tidcursor = 0; /* start at beginning after open */
  1497. /* initialize poll variables... */
  1498. rcd->urgent = 0;
  1499. rcd->urgent_poll = 0;
  1500. /*
  1501. * Now enable the ctxt for receive.
  1502. * For chips that are set to DMA the tail register to memory
  1503. * when they change (and when the update bit transitions from
  1504. * 0 to 1. So for those chips, we turn it off and then back on.
  1505. * This will (very briefly) affect any other open ctxts, but the
  1506. * duration is very short, and therefore isn't an issue. We
  1507. * explictly set the in-memory tail copy to 0 beforehand, so we
  1508. * don't have to wait to be sure the DMA update has happened
  1509. * (chip resets head/tail to 0 on transition to enable).
  1510. */
  1511. if (rcd->rcvhdrtail_kvaddr)
  1512. qib_clear_rcvhdrtail(rcd);
  1513. dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
  1514. rcd->ctxt);
  1515. /* Notify any waiting slaves */
  1516. if (rcd->subctxt_cnt) {
  1517. clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
  1518. wake_up(&rcd->wait);
  1519. }
  1520. return 0;
  1521. bail_pio:
  1522. qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
  1523. TXCHK_CHG_TYPE_KERN, rcd);
  1524. bail:
  1525. return ret;
  1526. }
  1527. /**
  1528. * unlock_exptid - unlock any expected TID entries context still had in use
  1529. * @rcd: ctxt
  1530. *
  1531. * We don't actually update the chip here, because we do a bulk update
  1532. * below, using f_clear_tids.
  1533. */
  1534. static void unlock_expected_tids(struct qib_ctxtdata *rcd)
  1535. {
  1536. struct qib_devdata *dd = rcd->dd;
  1537. int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
  1538. int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
  1539. for (i = ctxt_tidbase; i < maxtid; i++) {
  1540. struct page *p = dd->pageshadow[i];
  1541. dma_addr_t phys;
  1542. if (!p)
  1543. continue;
  1544. phys = dd->physshadow[i];
  1545. dd->physshadow[i] = dd->tidinvalid;
  1546. dd->pageshadow[i] = NULL;
  1547. pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
  1548. PCI_DMA_FROMDEVICE);
  1549. qib_release_user_pages(&p, 1);
  1550. cnt++;
  1551. }
  1552. }
  1553. static int qib_close(struct inode *in, struct file *fp)
  1554. {
  1555. int ret = 0;
  1556. struct qib_filedata *fd;
  1557. struct qib_ctxtdata *rcd;
  1558. struct qib_devdata *dd;
  1559. unsigned long flags;
  1560. unsigned ctxt;
  1561. pid_t pid;
  1562. mutex_lock(&qib_mutex);
  1563. fd = fp->private_data;
  1564. fp->private_data = NULL;
  1565. rcd = fd->rcd;
  1566. if (!rcd) {
  1567. mutex_unlock(&qib_mutex);
  1568. goto bail;
  1569. }
  1570. dd = rcd->dd;
  1571. /* ensure all pio buffer writes in progress are flushed */
  1572. qib_flush_wc();
  1573. /* drain user sdma queue */
  1574. if (fd->pq) {
  1575. qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
  1576. qib_user_sdma_queue_destroy(fd->pq);
  1577. }
  1578. if (fd->rec_cpu_num != -1)
  1579. __clear_bit(fd->rec_cpu_num, qib_cpulist);
  1580. if (--rcd->cnt) {
  1581. /*
  1582. * XXX If the master closes the context before the slave(s),
  1583. * revoke the mmap for the eager receive queue so
  1584. * the slave(s) don't wait for receive data forever.
  1585. */
  1586. rcd->active_slaves &= ~(1 << fd->subctxt);
  1587. rcd->subpid[fd->subctxt] = 0;
  1588. mutex_unlock(&qib_mutex);
  1589. goto bail;
  1590. }
  1591. /* early; no interrupt users after this */
  1592. spin_lock_irqsave(&dd->uctxt_lock, flags);
  1593. ctxt = rcd->ctxt;
  1594. dd->rcd[ctxt] = NULL;
  1595. pid = rcd->pid;
  1596. rcd->pid = 0;
  1597. spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  1598. if (rcd->rcvwait_to || rcd->piowait_to ||
  1599. rcd->rcvnowait || rcd->pionowait) {
  1600. rcd->rcvwait_to = 0;
  1601. rcd->piowait_to = 0;
  1602. rcd->rcvnowait = 0;
  1603. rcd->pionowait = 0;
  1604. }
  1605. if (rcd->flag)
  1606. rcd->flag = 0;
  1607. if (dd->kregbase) {
  1608. /* atomically clear receive enable ctxt and intr avail. */
  1609. dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
  1610. QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
  1611. /* clean up the pkeys for this ctxt user */
  1612. qib_clean_part_key(rcd, dd);
  1613. qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
  1614. qib_chg_pioavailkernel(dd, rcd->pio_base,
  1615. rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
  1616. dd->f_clear_tids(dd, rcd);
  1617. if (dd->pageshadow)
  1618. unlock_expected_tids(rcd);
  1619. qib_stats.sps_ctxts--;
  1620. }
  1621. mutex_unlock(&qib_mutex);
  1622. qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
  1623. bail:
  1624. kfree(fd);
  1625. return ret;
  1626. }
  1627. static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
  1628. {
  1629. struct qib_ctxt_info info;
  1630. int ret;
  1631. size_t sz;
  1632. struct qib_ctxtdata *rcd = ctxt_fp(fp);
  1633. struct qib_filedata *fd;
  1634. fd = fp->private_data;
  1635. info.num_active = qib_count_active_units();
  1636. info.unit = rcd->dd->unit;
  1637. info.port = rcd->ppd->port;
  1638. info.ctxt = rcd->ctxt;
  1639. info.subctxt = subctxt_fp(fp);
  1640. /* Number of user ctxts available for this device. */
  1641. info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
  1642. info.num_subctxts = rcd->subctxt_cnt;
  1643. info.rec_cpu = fd->rec_cpu_num;
  1644. sz = sizeof(info);
  1645. if (copy_to_user(uinfo, &info, sz)) {
  1646. ret = -EFAULT;
  1647. goto bail;
  1648. }
  1649. ret = 0;
  1650. bail:
  1651. return ret;
  1652. }
  1653. static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
  1654. u32 __user *inflightp)
  1655. {
  1656. const u32 val = qib_user_sdma_inflight_counter(pq);
  1657. if (put_user(val, inflightp))
  1658. return -EFAULT;
  1659. return 0;
  1660. }
  1661. static int qib_sdma_get_complete(struct qib_pportdata *ppd,
  1662. struct qib_user_sdma_queue *pq,
  1663. u32 __user *completep)
  1664. {
  1665. u32 val;
  1666. int err;
  1667. if (!pq)
  1668. return -EINVAL;
  1669. err = qib_user_sdma_make_progress(ppd, pq);
  1670. if (err < 0)
  1671. return err;
  1672. val = qib_user_sdma_complete_counter(pq);
  1673. if (put_user(val, completep))
  1674. return -EFAULT;
  1675. return 0;
  1676. }
  1677. static int disarm_req_delay(struct qib_ctxtdata *rcd)
  1678. {
  1679. int ret = 0;
  1680. if (!usable(rcd->ppd)) {
  1681. int i;
  1682. /*
  1683. * if link is down, or otherwise not usable, delay
  1684. * the caller up to 30 seconds, so we don't thrash
  1685. * in trying to get the chip back to ACTIVE, and
  1686. * set flag so they make the call again.
  1687. */
  1688. if (rcd->user_event_mask) {
  1689. /*
  1690. * subctxt_cnt is 0 if not shared, so do base
  1691. * separately, first, then remaining subctxt, if any
  1692. */
  1693. set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
  1694. &rcd->user_event_mask[0]);
  1695. for (i = 1; i < rcd->subctxt_cnt; i++)
  1696. set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
  1697. &rcd->user_event_mask[i]);
  1698. }
  1699. for (i = 0; !usable(rcd->ppd) && i < 300; i++)
  1700. msleep(100);
  1701. ret = -ENETDOWN;
  1702. }
  1703. return ret;
  1704. }
  1705. /*
  1706. * Find all user contexts in use, and set the specified bit in their
  1707. * event mask.
  1708. * See also find_ctxt() for a similar use, that is specific to send buffers.
  1709. */
  1710. int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
  1711. {
  1712. struct qib_ctxtdata *rcd;
  1713. unsigned ctxt;
  1714. int ret = 0;
  1715. spin_lock(&ppd->dd->uctxt_lock);
  1716. for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
  1717. ctxt++) {
  1718. rcd = ppd->dd->rcd[ctxt];
  1719. if (!rcd)
  1720. continue;
  1721. if (rcd->user_event_mask) {
  1722. int i;
  1723. /*
  1724. * subctxt_cnt is 0 if not shared, so do base
  1725. * separately, first, then remaining subctxt, if any
  1726. */
  1727. set_bit(evtbit, &rcd->user_event_mask[0]);
  1728. for (i = 1; i < rcd->subctxt_cnt; i++)
  1729. set_bit(evtbit, &rcd->user_event_mask[i]);
  1730. }
  1731. ret = 1;
  1732. break;
  1733. }
  1734. spin_unlock(&ppd->dd->uctxt_lock);
  1735. return ret;
  1736. }
  1737. /*
  1738. * clear the event notifier events for this context.
  1739. * For the DISARM_BUFS case, we also take action (this obsoletes
  1740. * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
  1741. * compatibility.
  1742. * Other bits don't currently require actions, just atomically clear.
  1743. * User process then performs actions appropriate to bit having been
  1744. * set, if desired, and checks again in future.
  1745. */
  1746. static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
  1747. unsigned long events)
  1748. {
  1749. int ret = 0, i;
  1750. for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
  1751. if (!test_bit(i, &events))
  1752. continue;
  1753. if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
  1754. (void)qib_disarm_piobufs_ifneeded(rcd);
  1755. ret = disarm_req_delay(rcd);
  1756. } else
  1757. clear_bit(i, &rcd->user_event_mask[subctxt]);
  1758. }
  1759. return ret;
  1760. }
  1761. static ssize_t qib_write(struct file *fp, const char __user *data,
  1762. size_t count, loff_t *off)
  1763. {
  1764. const struct qib_cmd __user *ucmd;
  1765. struct qib_ctxtdata *rcd;
  1766. const void __user *src;
  1767. size_t consumed, copy = 0;
  1768. struct qib_cmd cmd;
  1769. ssize_t ret = 0;
  1770. void *dest;
  1771. if (count < sizeof(cmd.type)) {
  1772. ret = -EINVAL;
  1773. goto bail;
  1774. }
  1775. ucmd = (const struct qib_cmd __user *) data;
  1776. if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
  1777. ret = -EFAULT;
  1778. goto bail;
  1779. }
  1780. consumed = sizeof(cmd.type);
  1781. switch (cmd.type) {
  1782. case QIB_CMD_ASSIGN_CTXT:
  1783. case QIB_CMD_USER_INIT:
  1784. copy = sizeof(cmd.cmd.user_info);
  1785. dest = &cmd.cmd.user_info;
  1786. src = &ucmd->cmd.user_info;
  1787. break;
  1788. case QIB_CMD_RECV_CTRL:
  1789. copy = sizeof(cmd.cmd.recv_ctrl);
  1790. dest = &cmd.cmd.recv_ctrl;
  1791. src = &ucmd->cmd.recv_ctrl;
  1792. break;
  1793. case QIB_CMD_CTXT_INFO:
  1794. copy = sizeof(cmd.cmd.ctxt_info);
  1795. dest = &cmd.cmd.ctxt_info;
  1796. src = &ucmd->cmd.ctxt_info;
  1797. break;
  1798. case QIB_CMD_TID_UPDATE:
  1799. case QIB_CMD_TID_FREE:
  1800. copy = sizeof(cmd.cmd.tid_info);
  1801. dest = &cmd.cmd.tid_info;
  1802. src = &ucmd->cmd.tid_info;
  1803. break;
  1804. case QIB_CMD_SET_PART_KEY:
  1805. copy = sizeof(cmd.cmd.part_key);
  1806. dest = &cmd.cmd.part_key;
  1807. src = &ucmd->cmd.part_key;
  1808. break;
  1809. case QIB_CMD_DISARM_BUFS:
  1810. case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
  1811. copy = 0;
  1812. src = NULL;
  1813. dest = NULL;
  1814. break;
  1815. case QIB_CMD_POLL_TYPE:
  1816. copy = sizeof(cmd.cmd.poll_type);
  1817. dest = &cmd.cmd.poll_type;
  1818. src = &ucmd->cmd.poll_type;
  1819. break;
  1820. case QIB_CMD_ARMLAUNCH_CTRL:
  1821. copy = sizeof(cmd.cmd.armlaunch_ctrl);
  1822. dest = &cmd.cmd.armlaunch_ctrl;
  1823. src = &ucmd->cmd.armlaunch_ctrl;
  1824. break;
  1825. case QIB_CMD_SDMA_INFLIGHT:
  1826. copy = sizeof(cmd.cmd.sdma_inflight);
  1827. dest = &cmd.cmd.sdma_inflight;
  1828. src = &ucmd->cmd.sdma_inflight;
  1829. break;
  1830. case QIB_CMD_SDMA_COMPLETE:
  1831. copy = sizeof(cmd.cmd.sdma_complete);
  1832. dest = &cmd.cmd.sdma_complete;
  1833. src = &ucmd->cmd.sdma_complete;
  1834. break;
  1835. case QIB_CMD_ACK_EVENT:
  1836. copy = sizeof(cmd.cmd.event_mask);
  1837. dest = &cmd.cmd.event_mask;
  1838. src = &ucmd->cmd.event_mask;
  1839. break;
  1840. default:
  1841. ret = -EINVAL;
  1842. goto bail;
  1843. }
  1844. if (copy) {
  1845. if ((count - consumed) < copy) {
  1846. ret = -EINVAL;
  1847. goto bail;
  1848. }
  1849. if (copy_from_user(dest, src, copy)) {
  1850. ret = -EFAULT;
  1851. goto bail;
  1852. }
  1853. consumed += copy;
  1854. }
  1855. rcd = ctxt_fp(fp);
  1856. if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
  1857. ret = -EINVAL;
  1858. goto bail;
  1859. }
  1860. switch (cmd.type) {
  1861. case QIB_CMD_ASSIGN_CTXT:
  1862. ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
  1863. if (ret)
  1864. goto bail;
  1865. break;
  1866. case QIB_CMD_USER_INIT:
  1867. ret = qib_do_user_init(fp, &cmd.cmd.user_info);
  1868. if (ret)
  1869. goto bail;
  1870. ret = qib_get_base_info(fp, (void __user *) (unsigned long)
  1871. cmd.cmd.user_info.spu_base_info,
  1872. cmd.cmd.user_info.spu_base_info_size);
  1873. break;
  1874. case QIB_CMD_RECV_CTRL:
  1875. ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
  1876. break;
  1877. case QIB_CMD_CTXT_INFO:
  1878. ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
  1879. (unsigned long) cmd.cmd.ctxt_info);
  1880. break;
  1881. case QIB_CMD_TID_UPDATE:
  1882. ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
  1883. break;
  1884. case QIB_CMD_TID_FREE:
  1885. ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
  1886. break;
  1887. case QIB_CMD_SET_PART_KEY:
  1888. ret = qib_set_part_key(rcd, cmd.cmd.part_key);
  1889. break;
  1890. case QIB_CMD_DISARM_BUFS:
  1891. (void)qib_disarm_piobufs_ifneeded(rcd);
  1892. ret = disarm_req_delay(rcd);
  1893. break;
  1894. case QIB_CMD_PIOAVAILUPD:
  1895. qib_force_pio_avail_update(rcd->dd);
  1896. break;
  1897. case QIB_CMD_POLL_TYPE:
  1898. rcd->poll_type = cmd.cmd.poll_type;
  1899. break;
  1900. case QIB_CMD_ARMLAUNCH_CTRL:
  1901. rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
  1902. break;
  1903. case QIB_CMD_SDMA_INFLIGHT:
  1904. ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
  1905. (u32 __user *) (unsigned long)
  1906. cmd.cmd.sdma_inflight);
  1907. break;
  1908. case QIB_CMD_SDMA_COMPLETE:
  1909. ret = qib_sdma_get_complete(rcd->ppd,
  1910. user_sdma_queue_fp(fp),
  1911. (u32 __user *) (unsigned long)
  1912. cmd.cmd.sdma_complete);
  1913. break;
  1914. case QIB_CMD_ACK_EVENT:
  1915. ret = qib_user_event_ack(rcd, subctxt_fp(fp),
  1916. cmd.cmd.event_mask);
  1917. break;
  1918. }
  1919. if (ret >= 0)
  1920. ret = consumed;
  1921. bail:
  1922. return ret;
  1923. }
  1924. static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
  1925. unsigned long dim, loff_t off)
  1926. {
  1927. struct qib_filedata *fp = iocb->ki_filp->private_data;
  1928. struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
  1929. struct qib_user_sdma_queue *pq = fp->pq;
  1930. if (!dim || !pq)
  1931. return -EINVAL;
  1932. return qib_user_sdma_writev(rcd, pq, iov, dim);
  1933. }
  1934. static struct class *qib_class;
  1935. static dev_t qib_dev;
  1936. int qib_cdev_init(int minor, const char *name,
  1937. const struct file_operations *fops,
  1938. struct cdev **cdevp, struct device **devp)
  1939. {
  1940. const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
  1941. struct cdev *cdev;
  1942. struct device *device = NULL;
  1943. int ret;
  1944. cdev = cdev_alloc();
  1945. if (!cdev) {
  1946. printk(KERN_ERR QIB_DRV_NAME
  1947. ": Could not allocate cdev for minor %d, %s\n",
  1948. minor, name);
  1949. ret = -ENOMEM;
  1950. goto done;
  1951. }
  1952. cdev->owner = THIS_MODULE;
  1953. cdev->ops = fops;
  1954. kobject_set_name(&cdev->kobj, name);
  1955. ret = cdev_add(cdev, dev, 1);
  1956. if (ret < 0) {
  1957. printk(KERN_ERR QIB_DRV_NAME
  1958. ": Could not add cdev for minor %d, %s (err %d)\n",
  1959. minor, name, -ret);
  1960. goto err_cdev;
  1961. }
  1962. device = device_create(qib_class, NULL, dev, NULL, name);
  1963. if (!IS_ERR(device))
  1964. goto done;
  1965. ret = PTR_ERR(device);
  1966. device = NULL;
  1967. printk(KERN_ERR QIB_DRV_NAME ": Could not create "
  1968. "device for minor %d, %s (err %d)\n",
  1969. minor, name, -ret);
  1970. err_cdev:
  1971. cdev_del(cdev);
  1972. cdev = NULL;
  1973. done:
  1974. *cdevp = cdev;
  1975. *devp = device;
  1976. return ret;
  1977. }
  1978. void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
  1979. {
  1980. struct device *device = *devp;
  1981. if (device) {
  1982. device_unregister(device);
  1983. *devp = NULL;
  1984. }
  1985. if (*cdevp) {
  1986. cdev_del(*cdevp);
  1987. *cdevp = NULL;
  1988. }
  1989. }
  1990. static struct cdev *wildcard_cdev;
  1991. static struct device *wildcard_device;
  1992. int __init qib_dev_init(void)
  1993. {
  1994. int ret;
  1995. ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
  1996. if (ret < 0) {
  1997. printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
  1998. "chrdev region (err %d)\n", -ret);
  1999. goto done;
  2000. }
  2001. qib_class = class_create(THIS_MODULE, "ipath");
  2002. if (IS_ERR(qib_class)) {
  2003. ret = PTR_ERR(qib_class);
  2004. printk(KERN_ERR QIB_DRV_NAME ": Could not create "
  2005. "device class (err %d)\n", -ret);
  2006. unregister_chrdev_region(qib_dev, QIB_NMINORS);
  2007. }
  2008. done:
  2009. return ret;
  2010. }
  2011. void qib_dev_cleanup(void)
  2012. {
  2013. if (qib_class) {
  2014. class_destroy(qib_class);
  2015. qib_class = NULL;
  2016. }
  2017. unregister_chrdev_region(qib_dev, QIB_NMINORS);
  2018. }
  2019. static atomic_t user_count = ATOMIC_INIT(0);
  2020. static void qib_user_remove(struct qib_devdata *dd)
  2021. {
  2022. if (atomic_dec_return(&user_count) == 0)
  2023. qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
  2024. qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
  2025. }
  2026. static int qib_user_add(struct qib_devdata *dd)
  2027. {
  2028. char name[10];
  2029. int ret;
  2030. if (atomic_inc_return(&user_count) == 1) {
  2031. ret = qib_cdev_init(0, "ipath", &qib_file_ops,
  2032. &wildcard_cdev, &wildcard_device);
  2033. if (ret)
  2034. goto done;
  2035. }
  2036. snprintf(name, sizeof(name), "ipath%d", dd->unit);
  2037. ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
  2038. &dd->user_cdev, &dd->user_device);
  2039. if (ret)
  2040. qib_user_remove(dd);
  2041. done:
  2042. return ret;
  2043. }
  2044. /*
  2045. * Create per-unit files in /dev
  2046. */
  2047. int qib_device_create(struct qib_devdata *dd)
  2048. {
  2049. int r, ret;
  2050. r = qib_user_add(dd);
  2051. ret = qib_diag_add(dd);
  2052. if (r && !ret)
  2053. ret = r;
  2054. return ret;
  2055. }
  2056. /*
  2057. * Remove per-unit files in /dev
  2058. * void, core kernel returns no errors for this stuff
  2059. */
  2060. void qib_device_remove(struct qib_devdata *dd)
  2061. {
  2062. qib_user_remove(dd);
  2063. qib_diag_remove(dd);
  2064. }