drm_bufs.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432
  1. /**
  2. * \file drm_bufs.h
  3. * Generic buffer template
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
  10. *
  11. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include <linux/vmalloc.h>
  35. #include "drmP.h"
  36. /**
  37. * Compute size order. Returns the exponent of the smaller power of two which
  38. * is greater or equal to given number.
  39. *
  40. * \param size size.
  41. * \return order.
  42. *
  43. * \todo Can be made faster.
  44. */
  45. int drm_order( unsigned long size )
  46. {
  47. int order;
  48. unsigned long tmp;
  49. for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
  50. ;
  51. if (size & (size - 1))
  52. ++order;
  53. return order;
  54. }
  55. EXPORT_SYMBOL(drm_order);
  56. #ifdef CONFIG_COMPAT
  57. /*
  58. * Used to allocate 32-bit handles for _DRM_SHM regions
  59. * The 0x10000000 value is chosen to be out of the way of
  60. * FB/register and GART physical addresses.
  61. */
  62. static unsigned int map32_handle = 0x10000000;
  63. #endif
  64. /**
  65. * Ioctl to specify a range of memory that is available for mapping by a non-root process.
  66. *
  67. * \param inode device inode.
  68. * \param filp file pointer.
  69. * \param cmd command.
  70. * \param arg pointer to a drm_map structure.
  71. * \return zero on success or a negative value on error.
  72. *
  73. * Adjusts the memory offset to its absolute value according to the mapping
  74. * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
  75. * applicable and if supported by the kernel.
  76. */
  77. int drm_addmap( struct inode *inode, struct file *filp,
  78. unsigned int cmd, unsigned long arg )
  79. {
  80. drm_file_t *priv = filp->private_data;
  81. drm_device_t *dev = priv->head->dev;
  82. drm_map_t *map;
  83. drm_map_t __user *argp = (void __user *)arg;
  84. drm_map_list_t *list;
  85. if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
  86. map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
  87. if ( !map )
  88. return -ENOMEM;
  89. if ( copy_from_user( map, argp, sizeof(*map) ) ) {
  90. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  91. return -EFAULT;
  92. }
  93. /* Only allow shared memory to be removable since we only keep enough
  94. * book keeping information about shared memory to allow for removal
  95. * when processes fork.
  96. */
  97. if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
  98. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  99. return -EINVAL;
  100. }
  101. DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
  102. map->offset, map->size, map->type );
  103. if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
  104. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  105. return -EINVAL;
  106. }
  107. map->mtrr = -1;
  108. map->handle = NULL;
  109. switch ( map->type ) {
  110. case _DRM_REGISTERS:
  111. case _DRM_FRAME_BUFFER:
  112. #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
  113. if ( map->offset + map->size < map->offset ||
  114. map->offset < virt_to_phys(high_memory) ) {
  115. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  116. return -EINVAL;
  117. }
  118. #endif
  119. #ifdef __alpha__
  120. map->offset += dev->hose->mem_space->start;
  121. #endif
  122. if (drm_core_has_MTRR(dev)) {
  123. if ( map->type == _DRM_FRAME_BUFFER ||
  124. (map->flags & _DRM_WRITE_COMBINING) ) {
  125. map->mtrr = mtrr_add( map->offset, map->size,
  126. MTRR_TYPE_WRCOMB, 1 );
  127. }
  128. }
  129. if (map->type == _DRM_REGISTERS)
  130. map->handle = drm_ioremap( map->offset, map->size,
  131. dev );
  132. break;
  133. case _DRM_SHM:
  134. map->handle = vmalloc_32(map->size);
  135. DRM_DEBUG( "%lu %d %p\n",
  136. map->size, drm_order( map->size ), map->handle );
  137. if ( !map->handle ) {
  138. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  139. return -ENOMEM;
  140. }
  141. map->offset = (unsigned long)map->handle;
  142. if ( map->flags & _DRM_CONTAINS_LOCK ) {
  143. /* Prevent a 2nd X Server from creating a 2nd lock */
  144. if (dev->lock.hw_lock != NULL) {
  145. vfree( map->handle );
  146. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  147. return -EBUSY;
  148. }
  149. dev->sigdata.lock =
  150. dev->lock.hw_lock = map->handle; /* Pointer to lock */
  151. }
  152. break;
  153. case _DRM_AGP:
  154. if (drm_core_has_AGP(dev)) {
  155. #ifdef __alpha__
  156. map->offset += dev->hose->mem_space->start;
  157. #endif
  158. map->offset += dev->agp->base;
  159. map->mtrr = dev->agp->agp_mtrr; /* for getmap */
  160. }
  161. break;
  162. case _DRM_SCATTER_GATHER:
  163. if (!dev->sg) {
  164. drm_free(map, sizeof(*map), DRM_MEM_MAPS);
  165. return -EINVAL;
  166. }
  167. map->offset += dev->sg->handle;
  168. break;
  169. case _DRM_CONSISTENT:
  170. {
  171. /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
  172. * As we're limit the address to 2^32-1 (or lses),
  173. * casting it down to 32 bits is no problem, but we
  174. * need to point to a 64bit variable first. */
  175. dma_addr_t bus_addr;
  176. map->handle = drm_pci_alloc(dev, map->size, map->size,
  177. 0xffffffffUL, &bus_addr);
  178. map->offset = (unsigned long)bus_addr;
  179. if (!map->handle) {
  180. drm_free(map, sizeof(*map), DRM_MEM_MAPS);
  181. return -ENOMEM;
  182. }
  183. break;
  184. }
  185. default:
  186. drm_free( map, sizeof(*map), DRM_MEM_MAPS );
  187. return -EINVAL;
  188. }
  189. list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
  190. if(!list) {
  191. drm_free(map, sizeof(*map), DRM_MEM_MAPS);
  192. return -EINVAL;
  193. }
  194. memset(list, 0, sizeof(*list));
  195. list->map = map;
  196. down(&dev->struct_sem);
  197. list_add(&list->head, &dev->maplist->head);
  198. #ifdef CONFIG_COMPAT
  199. /* Assign a 32-bit handle for _DRM_SHM mappings */
  200. /* We do it here so that dev->struct_sem protects the increment */
  201. if (map->type == _DRM_SHM)
  202. map->offset = map32_handle += PAGE_SIZE;
  203. #endif
  204. up(&dev->struct_sem);
  205. if ( copy_to_user( argp, map, sizeof(*map) ) )
  206. return -EFAULT;
  207. if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
  208. return -EFAULT;
  209. return 0;
  210. }
  211. /**
  212. * Remove a map private from list and deallocate resources if the mapping
  213. * isn't in use.
  214. *
  215. * \param inode device inode.
  216. * \param filp file pointer.
  217. * \param cmd command.
  218. * \param arg pointer to a drm_map_t structure.
  219. * \return zero on success or a negative value on error.
  220. *
  221. * Searches the map on drm_device::maplist, removes it from the list, see if
  222. * its being used, and free any associate resource (such as MTRR's) if it's not
  223. * being on use.
  224. *
  225. * \sa addmap().
  226. */
  227. int drm_rmmap(struct inode *inode, struct file *filp,
  228. unsigned int cmd, unsigned long arg)
  229. {
  230. drm_file_t *priv = filp->private_data;
  231. drm_device_t *dev = priv->head->dev;
  232. struct list_head *list;
  233. drm_map_list_t *r_list = NULL;
  234. drm_vma_entry_t *pt, *prev;
  235. drm_map_t *map;
  236. drm_map_t request;
  237. int found_maps = 0;
  238. if (copy_from_user(&request, (drm_map_t __user *)arg,
  239. sizeof(request))) {
  240. return -EFAULT;
  241. }
  242. down(&dev->struct_sem);
  243. list = &dev->maplist->head;
  244. list_for_each(list, &dev->maplist->head) {
  245. r_list = list_entry(list, drm_map_list_t, head);
  246. if(r_list->map &&
  247. r_list->map->offset == (unsigned long) request.handle &&
  248. r_list->map->flags & _DRM_REMOVABLE) break;
  249. }
  250. /* List has wrapped around to the head pointer, or its empty we didn't
  251. * find anything.
  252. */
  253. if(list == (&dev->maplist->head)) {
  254. up(&dev->struct_sem);
  255. return -EINVAL;
  256. }
  257. map = r_list->map;
  258. list_del(list);
  259. drm_free(list, sizeof(*list), DRM_MEM_MAPS);
  260. for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
  261. if (pt->vma->vm_private_data == map) found_maps++;
  262. }
  263. if(!found_maps) {
  264. switch (map->type) {
  265. case _DRM_REGISTERS:
  266. case _DRM_FRAME_BUFFER:
  267. if (drm_core_has_MTRR(dev)) {
  268. if (map->mtrr >= 0) {
  269. int retcode;
  270. retcode = mtrr_del(map->mtrr,
  271. map->offset,
  272. map->size);
  273. DRM_DEBUG("mtrr_del = %d\n", retcode);
  274. }
  275. }
  276. drm_ioremapfree(map->handle, map->size, dev);
  277. break;
  278. case _DRM_SHM:
  279. vfree(map->handle);
  280. break;
  281. case _DRM_AGP:
  282. case _DRM_SCATTER_GATHER:
  283. break;
  284. case _DRM_CONSISTENT:
  285. drm_pci_free(dev, map->size, map->handle, map->offset);
  286. break;
  287. }
  288. drm_free(map, sizeof(*map), DRM_MEM_MAPS);
  289. }
  290. up(&dev->struct_sem);
  291. return 0;
  292. }
  293. /**
  294. * Cleanup after an error on one of the addbufs() functions.
  295. *
  296. * \param entry buffer entry where the error occurred.
  297. *
  298. * Frees any pages and buffers associated with the given entry.
  299. */
  300. static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
  301. {
  302. int i;
  303. if (entry->seg_count) {
  304. for (i = 0; i < entry->seg_count; i++) {
  305. if (entry->seglist[i]) {
  306. drm_free_pages(entry->seglist[i],
  307. entry->page_order,
  308. DRM_MEM_DMA);
  309. }
  310. }
  311. drm_free(entry->seglist,
  312. entry->seg_count *
  313. sizeof(*entry->seglist),
  314. DRM_MEM_SEGS);
  315. entry->seg_count = 0;
  316. }
  317. if (entry->buf_count) {
  318. for (i = 0; i < entry->buf_count; i++) {
  319. if (entry->buflist[i].dev_private) {
  320. drm_free(entry->buflist[i].dev_private,
  321. entry->buflist[i].dev_priv_size,
  322. DRM_MEM_BUFS);
  323. }
  324. }
  325. drm_free(entry->buflist,
  326. entry->buf_count *
  327. sizeof(*entry->buflist),
  328. DRM_MEM_BUFS);
  329. entry->buf_count = 0;
  330. }
  331. }
  332. #if __OS_HAS_AGP
  333. /**
  334. * Add AGP buffers for DMA transfers.
  335. *
  336. * \param dev drm_device_t to which the buffers are to be added.
  337. * \param request pointer to a drm_buf_desc_t describing the request.
  338. * \return zero on success or a negative number on failure.
  339. *
  340. * After some sanity checks creates a drm_buf structure for each buffer and
  341. * reallocates the buffer list of the same size order to accommodate the new
  342. * buffers.
  343. */
  344. static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
  345. {
  346. drm_device_dma_t *dma = dev->dma;
  347. drm_buf_entry_t *entry;
  348. drm_buf_t *buf;
  349. unsigned long offset;
  350. unsigned long agp_offset;
  351. int count;
  352. int order;
  353. int size;
  354. int alignment;
  355. int page_order;
  356. int total;
  357. int byte_count;
  358. int i;
  359. drm_buf_t **temp_buflist;
  360. if ( !dma ) return -EINVAL;
  361. count = request->count;
  362. order = drm_order(request->size);
  363. size = 1 << order;
  364. alignment = (request->flags & _DRM_PAGE_ALIGN)
  365. ? PAGE_ALIGN(size) : size;
  366. page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
  367. total = PAGE_SIZE << page_order;
  368. byte_count = 0;
  369. agp_offset = dev->agp->base + request->agp_start;
  370. DRM_DEBUG( "count: %d\n", count );
  371. DRM_DEBUG( "order: %d\n", order );
  372. DRM_DEBUG( "size: %d\n", size );
  373. DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
  374. DRM_DEBUG( "alignment: %d\n", alignment );
  375. DRM_DEBUG( "page_order: %d\n", page_order );
  376. DRM_DEBUG( "total: %d\n", total );
  377. if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
  378. if ( dev->queue_count ) return -EBUSY; /* Not while in use */
  379. spin_lock( &dev->count_lock );
  380. if ( dev->buf_use ) {
  381. spin_unlock( &dev->count_lock );
  382. return -EBUSY;
  383. }
  384. atomic_inc( &dev->buf_alloc );
  385. spin_unlock( &dev->count_lock );
  386. down( &dev->struct_sem );
  387. entry = &dma->bufs[order];
  388. if ( entry->buf_count ) {
  389. up( &dev->struct_sem );
  390. atomic_dec( &dev->buf_alloc );
  391. return -ENOMEM; /* May only call once for each order */
  392. }
  393. if (count < 0 || count > 4096) {
  394. up( &dev->struct_sem );
  395. atomic_dec( &dev->buf_alloc );
  396. return -EINVAL;
  397. }
  398. entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
  399. DRM_MEM_BUFS );
  400. if ( !entry->buflist ) {
  401. up( &dev->struct_sem );
  402. atomic_dec( &dev->buf_alloc );
  403. return -ENOMEM;
  404. }
  405. memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
  406. entry->buf_size = size;
  407. entry->page_order = page_order;
  408. offset = 0;
  409. while ( entry->buf_count < count ) {
  410. buf = &entry->buflist[entry->buf_count];
  411. buf->idx = dma->buf_count + entry->buf_count;
  412. buf->total = alignment;
  413. buf->order = order;
  414. buf->used = 0;
  415. buf->offset = (dma->byte_count + offset);
  416. buf->bus_address = agp_offset + offset;
  417. buf->address = (void *)(agp_offset + offset);
  418. buf->next = NULL;
  419. buf->waiting = 0;
  420. buf->pending = 0;
  421. init_waitqueue_head( &buf->dma_wait );
  422. buf->filp = NULL;
  423. buf->dev_priv_size = dev->driver->dev_priv_size;
  424. buf->dev_private = drm_alloc( buf->dev_priv_size,
  425. DRM_MEM_BUFS );
  426. if(!buf->dev_private) {
  427. /* Set count correctly so we free the proper amount. */
  428. entry->buf_count = count;
  429. drm_cleanup_buf_error(dev,entry);
  430. up( &dev->struct_sem );
  431. atomic_dec( &dev->buf_alloc );
  432. return -ENOMEM;
  433. }
  434. memset( buf->dev_private, 0, buf->dev_priv_size );
  435. DRM_DEBUG( "buffer %d @ %p\n",
  436. entry->buf_count, buf->address );
  437. offset += alignment;
  438. entry->buf_count++;
  439. byte_count += PAGE_SIZE << page_order;
  440. }
  441. DRM_DEBUG( "byte_count: %d\n", byte_count );
  442. temp_buflist = drm_realloc( dma->buflist,
  443. dma->buf_count * sizeof(*dma->buflist),
  444. (dma->buf_count + entry->buf_count)
  445. * sizeof(*dma->buflist),
  446. DRM_MEM_BUFS );
  447. if(!temp_buflist) {
  448. /* Free the entry because it isn't valid */
  449. drm_cleanup_buf_error(dev,entry);
  450. up( &dev->struct_sem );
  451. atomic_dec( &dev->buf_alloc );
  452. return -ENOMEM;
  453. }
  454. dma->buflist = temp_buflist;
  455. for ( i = 0 ; i < entry->buf_count ; i++ ) {
  456. dma->buflist[i + dma->buf_count] = &entry->buflist[i];
  457. }
  458. dma->buf_count += entry->buf_count;
  459. dma->byte_count += byte_count;
  460. DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
  461. DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
  462. up( &dev->struct_sem );
  463. request->count = entry->buf_count;
  464. request->size = size;
  465. dma->flags = _DRM_DMA_USE_AGP;
  466. atomic_dec( &dev->buf_alloc );
  467. return 0;
  468. }
  469. #endif /* __OS_HAS_AGP */
  470. static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
  471. {
  472. drm_device_dma_t *dma = dev->dma;
  473. int count;
  474. int order;
  475. int size;
  476. int total;
  477. int page_order;
  478. drm_buf_entry_t *entry;
  479. unsigned long page;
  480. drm_buf_t *buf;
  481. int alignment;
  482. unsigned long offset;
  483. int i;
  484. int byte_count;
  485. int page_count;
  486. unsigned long *temp_pagelist;
  487. drm_buf_t **temp_buflist;
  488. if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
  489. if ( !dma ) return -EINVAL;
  490. count = request->count;
  491. order = drm_order(request->size);
  492. size = 1 << order;
  493. DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
  494. request->count, request->size, size,
  495. order, dev->queue_count );
  496. if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
  497. if ( dev->queue_count ) return -EBUSY; /* Not while in use */
  498. alignment = (request->flags & _DRM_PAGE_ALIGN)
  499. ? PAGE_ALIGN(size) : size;
  500. page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
  501. total = PAGE_SIZE << page_order;
  502. spin_lock( &dev->count_lock );
  503. if ( dev->buf_use ) {
  504. spin_unlock( &dev->count_lock );
  505. return -EBUSY;
  506. }
  507. atomic_inc( &dev->buf_alloc );
  508. spin_unlock( &dev->count_lock );
  509. down( &dev->struct_sem );
  510. entry = &dma->bufs[order];
  511. if ( entry->buf_count ) {
  512. up( &dev->struct_sem );
  513. atomic_dec( &dev->buf_alloc );
  514. return -ENOMEM; /* May only call once for each order */
  515. }
  516. if (count < 0 || count > 4096) {
  517. up( &dev->struct_sem );
  518. atomic_dec( &dev->buf_alloc );
  519. return -EINVAL;
  520. }
  521. entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
  522. DRM_MEM_BUFS );
  523. if ( !entry->buflist ) {
  524. up( &dev->struct_sem );
  525. atomic_dec( &dev->buf_alloc );
  526. return -ENOMEM;
  527. }
  528. memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
  529. entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
  530. DRM_MEM_SEGS );
  531. if ( !entry->seglist ) {
  532. drm_free( entry->buflist,
  533. count * sizeof(*entry->buflist),
  534. DRM_MEM_BUFS );
  535. up( &dev->struct_sem );
  536. atomic_dec( &dev->buf_alloc );
  537. return -ENOMEM;
  538. }
  539. memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
  540. /* Keep the original pagelist until we know all the allocations
  541. * have succeeded
  542. */
  543. temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
  544. * sizeof(*dma->pagelist),
  545. DRM_MEM_PAGES );
  546. if (!temp_pagelist) {
  547. drm_free( entry->buflist,
  548. count * sizeof(*entry->buflist),
  549. DRM_MEM_BUFS );
  550. drm_free( entry->seglist,
  551. count * sizeof(*entry->seglist),
  552. DRM_MEM_SEGS );
  553. up( &dev->struct_sem );
  554. atomic_dec( &dev->buf_alloc );
  555. return -ENOMEM;
  556. }
  557. memcpy(temp_pagelist,
  558. dma->pagelist,
  559. dma->page_count * sizeof(*dma->pagelist));
  560. DRM_DEBUG( "pagelist: %d entries\n",
  561. dma->page_count + (count << page_order) );
  562. entry->buf_size = size;
  563. entry->page_order = page_order;
  564. byte_count = 0;
  565. page_count = 0;
  566. while ( entry->buf_count < count ) {
  567. page = drm_alloc_pages( page_order, DRM_MEM_DMA );
  568. if ( !page ) {
  569. /* Set count correctly so we free the proper amount. */
  570. entry->buf_count = count;
  571. entry->seg_count = count;
  572. drm_cleanup_buf_error(dev, entry);
  573. drm_free( temp_pagelist,
  574. (dma->page_count + (count << page_order))
  575. * sizeof(*dma->pagelist),
  576. DRM_MEM_PAGES );
  577. up( &dev->struct_sem );
  578. atomic_dec( &dev->buf_alloc );
  579. return -ENOMEM;
  580. }
  581. entry->seglist[entry->seg_count++] = page;
  582. for ( i = 0 ; i < (1 << page_order) ; i++ ) {
  583. DRM_DEBUG( "page %d @ 0x%08lx\n",
  584. dma->page_count + page_count,
  585. page + PAGE_SIZE * i );
  586. temp_pagelist[dma->page_count + page_count++]
  587. = page + PAGE_SIZE * i;
  588. }
  589. for ( offset = 0 ;
  590. offset + size <= total && entry->buf_count < count ;
  591. offset += alignment, ++entry->buf_count ) {
  592. buf = &entry->buflist[entry->buf_count];
  593. buf->idx = dma->buf_count + entry->buf_count;
  594. buf->total = alignment;
  595. buf->order = order;
  596. buf->used = 0;
  597. buf->offset = (dma->byte_count + byte_count + offset);
  598. buf->address = (void *)(page + offset);
  599. buf->next = NULL;
  600. buf->waiting = 0;
  601. buf->pending = 0;
  602. init_waitqueue_head( &buf->dma_wait );
  603. buf->filp = NULL;
  604. buf->dev_priv_size = dev->driver->dev_priv_size;
  605. buf->dev_private = drm_alloc( buf->dev_priv_size,
  606. DRM_MEM_BUFS );
  607. if(!buf->dev_private) {
  608. /* Set count correctly so we free the proper amount. */
  609. entry->buf_count = count;
  610. entry->seg_count = count;
  611. drm_cleanup_buf_error(dev,entry);
  612. drm_free( temp_pagelist,
  613. (dma->page_count + (count << page_order))
  614. * sizeof(*dma->pagelist),
  615. DRM_MEM_PAGES );
  616. up( &dev->struct_sem );
  617. atomic_dec( &dev->buf_alloc );
  618. return -ENOMEM;
  619. }
  620. memset( buf->dev_private, 0, buf->dev_priv_size );
  621. DRM_DEBUG( "buffer %d @ %p\n",
  622. entry->buf_count, buf->address );
  623. }
  624. byte_count += PAGE_SIZE << page_order;
  625. }
  626. temp_buflist = drm_realloc( dma->buflist,
  627. dma->buf_count * sizeof(*dma->buflist),
  628. (dma->buf_count + entry->buf_count)
  629. * sizeof(*dma->buflist),
  630. DRM_MEM_BUFS );
  631. if (!temp_buflist) {
  632. /* Free the entry because it isn't valid */
  633. drm_cleanup_buf_error(dev,entry);
  634. drm_free( temp_pagelist,
  635. (dma->page_count + (count << page_order))
  636. * sizeof(*dma->pagelist),
  637. DRM_MEM_PAGES );
  638. up( &dev->struct_sem );
  639. atomic_dec( &dev->buf_alloc );
  640. return -ENOMEM;
  641. }
  642. dma->buflist = temp_buflist;
  643. for ( i = 0 ; i < entry->buf_count ; i++ ) {
  644. dma->buflist[i + dma->buf_count] = &entry->buflist[i];
  645. }
  646. /* No allocations failed, so now we can replace the orginal pagelist
  647. * with the new one.
  648. */
  649. if (dma->page_count) {
  650. drm_free(dma->pagelist,
  651. dma->page_count * sizeof(*dma->pagelist),
  652. DRM_MEM_PAGES);
  653. }
  654. dma->pagelist = temp_pagelist;
  655. dma->buf_count += entry->buf_count;
  656. dma->seg_count += entry->seg_count;
  657. dma->page_count += entry->seg_count << page_order;
  658. dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
  659. up( &dev->struct_sem );
  660. request->count = entry->buf_count;
  661. request->size = size;
  662. atomic_dec( &dev->buf_alloc );
  663. return 0;
  664. }
  665. static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
  666. {
  667. drm_device_dma_t *dma = dev->dma;
  668. drm_buf_entry_t *entry;
  669. drm_buf_t *buf;
  670. unsigned long offset;
  671. unsigned long agp_offset;
  672. int count;
  673. int order;
  674. int size;
  675. int alignment;
  676. int page_order;
  677. int total;
  678. int byte_count;
  679. int i;
  680. drm_buf_t **temp_buflist;
  681. if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
  682. if ( !dma ) return -EINVAL;
  683. count = request->count;
  684. order = drm_order(request->size);
  685. size = 1 << order;
  686. alignment = (request->flags & _DRM_PAGE_ALIGN)
  687. ? PAGE_ALIGN(size) : size;
  688. page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
  689. total = PAGE_SIZE << page_order;
  690. byte_count = 0;
  691. agp_offset = request->agp_start;
  692. DRM_DEBUG( "count: %d\n", count );
  693. DRM_DEBUG( "order: %d\n", order );
  694. DRM_DEBUG( "size: %d\n", size );
  695. DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
  696. DRM_DEBUG( "alignment: %d\n", alignment );
  697. DRM_DEBUG( "page_order: %d\n", page_order );
  698. DRM_DEBUG( "total: %d\n", total );
  699. if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
  700. if ( dev->queue_count ) return -EBUSY; /* Not while in use */
  701. spin_lock( &dev->count_lock );
  702. if ( dev->buf_use ) {
  703. spin_unlock( &dev->count_lock );
  704. return -EBUSY;
  705. }
  706. atomic_inc( &dev->buf_alloc );
  707. spin_unlock( &dev->count_lock );
  708. down( &dev->struct_sem );
  709. entry = &dma->bufs[order];
  710. if ( entry->buf_count ) {
  711. up( &dev->struct_sem );
  712. atomic_dec( &dev->buf_alloc );
  713. return -ENOMEM; /* May only call once for each order */
  714. }
  715. if (count < 0 || count > 4096) {
  716. up( &dev->struct_sem );
  717. atomic_dec( &dev->buf_alloc );
  718. return -EINVAL;
  719. }
  720. entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
  721. DRM_MEM_BUFS );
  722. if ( !entry->buflist ) {
  723. up( &dev->struct_sem );
  724. atomic_dec( &dev->buf_alloc );
  725. return -ENOMEM;
  726. }
  727. memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
  728. entry->buf_size = size;
  729. entry->page_order = page_order;
  730. offset = 0;
  731. while ( entry->buf_count < count ) {
  732. buf = &entry->buflist[entry->buf_count];
  733. buf->idx = dma->buf_count + entry->buf_count;
  734. buf->total = alignment;
  735. buf->order = order;
  736. buf->used = 0;
  737. buf->offset = (dma->byte_count + offset);
  738. buf->bus_address = agp_offset + offset;
  739. buf->address = (void *)(agp_offset + offset + dev->sg->handle);
  740. buf->next = NULL;
  741. buf->waiting = 0;
  742. buf->pending = 0;
  743. init_waitqueue_head( &buf->dma_wait );
  744. buf->filp = NULL;
  745. buf->dev_priv_size = dev->driver->dev_priv_size;
  746. buf->dev_private = drm_alloc( buf->dev_priv_size,
  747. DRM_MEM_BUFS );
  748. if(!buf->dev_private) {
  749. /* Set count correctly so we free the proper amount. */
  750. entry->buf_count = count;
  751. drm_cleanup_buf_error(dev,entry);
  752. up( &dev->struct_sem );
  753. atomic_dec( &dev->buf_alloc );
  754. return -ENOMEM;
  755. }
  756. memset( buf->dev_private, 0, buf->dev_priv_size );
  757. DRM_DEBUG( "buffer %d @ %p\n",
  758. entry->buf_count, buf->address );
  759. offset += alignment;
  760. entry->buf_count++;
  761. byte_count += PAGE_SIZE << page_order;
  762. }
  763. DRM_DEBUG( "byte_count: %d\n", byte_count );
  764. temp_buflist = drm_realloc( dma->buflist,
  765. dma->buf_count * sizeof(*dma->buflist),
  766. (dma->buf_count + entry->buf_count)
  767. * sizeof(*dma->buflist),
  768. DRM_MEM_BUFS );
  769. if(!temp_buflist) {
  770. /* Free the entry because it isn't valid */
  771. drm_cleanup_buf_error(dev,entry);
  772. up( &dev->struct_sem );
  773. atomic_dec( &dev->buf_alloc );
  774. return -ENOMEM;
  775. }
  776. dma->buflist = temp_buflist;
  777. for ( i = 0 ; i < entry->buf_count ; i++ ) {
  778. dma->buflist[i + dma->buf_count] = &entry->buflist[i];
  779. }
  780. dma->buf_count += entry->buf_count;
  781. dma->byte_count += byte_count;
  782. DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
  783. DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
  784. up( &dev->struct_sem );
  785. request->count = entry->buf_count;
  786. request->size = size;
  787. dma->flags = _DRM_DMA_USE_SG;
  788. atomic_dec( &dev->buf_alloc );
  789. return 0;
  790. }
  791. int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
  792. {
  793. drm_device_dma_t *dma = dev->dma;
  794. drm_buf_entry_t *entry;
  795. drm_buf_t *buf;
  796. unsigned long offset;
  797. unsigned long agp_offset;
  798. int count;
  799. int order;
  800. int size;
  801. int alignment;
  802. int page_order;
  803. int total;
  804. int byte_count;
  805. int i;
  806. drm_buf_t **temp_buflist;
  807. if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
  808. return -EINVAL;
  809. if (!dma)
  810. return -EINVAL;
  811. count = request->count;
  812. order = drm_order(request->size);
  813. size = 1 << order;
  814. alignment = (request->flags & _DRM_PAGE_ALIGN)
  815. ? PAGE_ALIGN(size) : size;
  816. page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
  817. total = PAGE_SIZE << page_order;
  818. byte_count = 0;
  819. agp_offset = request->agp_start;
  820. DRM_DEBUG("count: %d\n", count);
  821. DRM_DEBUG("order: %d\n", order);
  822. DRM_DEBUG("size: %d\n", size);
  823. DRM_DEBUG("agp_offset: %lu\n", agp_offset);
  824. DRM_DEBUG("alignment: %d\n", alignment);
  825. DRM_DEBUG("page_order: %d\n", page_order);
  826. DRM_DEBUG("total: %d\n", total);
  827. if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
  828. return -EINVAL;
  829. if (dev->queue_count)
  830. return -EBUSY; /* Not while in use */
  831. spin_lock(&dev->count_lock);
  832. if (dev->buf_use) {
  833. spin_unlock(&dev->count_lock);
  834. return -EBUSY;
  835. }
  836. atomic_inc(&dev->buf_alloc);
  837. spin_unlock(&dev->count_lock);
  838. down(&dev->struct_sem);
  839. entry = &dma->bufs[order];
  840. if (entry->buf_count) {
  841. up(&dev->struct_sem);
  842. atomic_dec(&dev->buf_alloc);
  843. return -ENOMEM; /* May only call once for each order */
  844. }
  845. if (count < 0 || count > 4096) {
  846. up(&dev->struct_sem);
  847. atomic_dec(&dev->buf_alloc);
  848. return -EINVAL;
  849. }
  850. entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
  851. DRM_MEM_BUFS);
  852. if (!entry->buflist) {
  853. up(&dev->struct_sem);
  854. atomic_dec(&dev->buf_alloc);
  855. return -ENOMEM;
  856. }
  857. memset(entry->buflist, 0, count * sizeof(*entry->buflist));
  858. entry->buf_size = size;
  859. entry->page_order = page_order;
  860. offset = 0;
  861. while (entry->buf_count < count) {
  862. buf = &entry->buflist[entry->buf_count];
  863. buf->idx = dma->buf_count + entry->buf_count;
  864. buf->total = alignment;
  865. buf->order = order;
  866. buf->used = 0;
  867. buf->offset = (dma->byte_count + offset);
  868. buf->bus_address = agp_offset + offset;
  869. buf->address = (void *)(agp_offset + offset);
  870. buf->next = NULL;
  871. buf->waiting = 0;
  872. buf->pending = 0;
  873. init_waitqueue_head(&buf->dma_wait);
  874. buf->filp = NULL;
  875. buf->dev_priv_size = dev->driver->dev_priv_size;
  876. buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
  877. if (!buf->dev_private) {
  878. /* Set count correctly so we free the proper amount. */
  879. entry->buf_count = count;
  880. drm_cleanup_buf_error(dev, entry);
  881. up(&dev->struct_sem);
  882. atomic_dec(&dev->buf_alloc);
  883. return -ENOMEM;
  884. }
  885. memset(buf->dev_private, 0, buf->dev_priv_size);
  886. DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
  887. offset += alignment;
  888. entry->buf_count++;
  889. byte_count += PAGE_SIZE << page_order;
  890. }
  891. DRM_DEBUG("byte_count: %d\n", byte_count);
  892. temp_buflist = drm_realloc(dma->buflist,
  893. dma->buf_count * sizeof(*dma->buflist),
  894. (dma->buf_count + entry->buf_count)
  895. * sizeof(*dma->buflist), DRM_MEM_BUFS);
  896. if (!temp_buflist) {
  897. /* Free the entry because it isn't valid */
  898. drm_cleanup_buf_error(dev, entry);
  899. up(&dev->struct_sem);
  900. atomic_dec(&dev->buf_alloc);
  901. return -ENOMEM;
  902. }
  903. dma->buflist = temp_buflist;
  904. for (i = 0; i < entry->buf_count; i++) {
  905. dma->buflist[i + dma->buf_count] = &entry->buflist[i];
  906. }
  907. dma->buf_count += entry->buf_count;
  908. dma->byte_count += byte_count;
  909. DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
  910. DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
  911. up(&dev->struct_sem);
  912. request->count = entry->buf_count;
  913. request->size = size;
  914. dma->flags = _DRM_DMA_USE_FB;
  915. atomic_dec(&dev->buf_alloc);
  916. return 0;
  917. }
  918. /**
  919. * Add buffers for DMA transfers (ioctl).
  920. *
  921. * \param inode device inode.
  922. * \param filp file pointer.
  923. * \param cmd command.
  924. * \param arg pointer to a drm_buf_desc_t request.
  925. * \return zero on success or a negative number on failure.
  926. *
  927. * According with the memory type specified in drm_buf_desc::flags and the
  928. * build options, it dispatches the call either to addbufs_agp(),
  929. * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
  930. * PCI memory respectively.
  931. */
  932. int drm_addbufs( struct inode *inode, struct file *filp,
  933. unsigned int cmd, unsigned long arg )
  934. {
  935. drm_buf_desc_t request;
  936. drm_file_t *priv = filp->private_data;
  937. drm_device_t *dev = priv->head->dev;
  938. int ret;
  939. if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
  940. return -EINVAL;
  941. if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
  942. sizeof(request) ) )
  943. return -EFAULT;
  944. #if __OS_HAS_AGP
  945. if ( request.flags & _DRM_AGP_BUFFER )
  946. ret=drm_addbufs_agp(dev, &request);
  947. else
  948. #endif
  949. if ( request.flags & _DRM_SG_BUFFER )
  950. ret=drm_addbufs_sg(dev, &request);
  951. else if ( request.flags & _DRM_FB_BUFFER)
  952. ret=drm_addbufs_fb(dev, &request);
  953. else
  954. ret=drm_addbufs_pci(dev, &request);
  955. if (ret==0) {
  956. if (copy_to_user((void __user *)arg, &request,
  957. sizeof(request))) {
  958. ret = -EFAULT;
  959. }
  960. }
  961. return ret;
  962. }
  963. /**
  964. * Get information about the buffer mappings.
  965. *
  966. * This was originally mean for debugging purposes, or by a sophisticated
  967. * client library to determine how best to use the available buffers (e.g.,
  968. * large buffers can be used for image transfer).
  969. *
  970. * \param inode device inode.
  971. * \param filp file pointer.
  972. * \param cmd command.
  973. * \param arg pointer to a drm_buf_info structure.
  974. * \return zero on success or a negative number on failure.
  975. *
  976. * Increments drm_device::buf_use while holding the drm_device::count_lock
  977. * lock, preventing of allocating more buffers after this call. Information
  978. * about each requested buffer is then copied into user space.
  979. */
  980. int drm_infobufs( struct inode *inode, struct file *filp,
  981. unsigned int cmd, unsigned long arg )
  982. {
  983. drm_file_t *priv = filp->private_data;
  984. drm_device_t *dev = priv->head->dev;
  985. drm_device_dma_t *dma = dev->dma;
  986. drm_buf_info_t request;
  987. drm_buf_info_t __user *argp = (void __user *)arg;
  988. int i;
  989. int count;
  990. if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
  991. return -EINVAL;
  992. if ( !dma ) return -EINVAL;
  993. spin_lock( &dev->count_lock );
  994. if ( atomic_read( &dev->buf_alloc ) ) {
  995. spin_unlock( &dev->count_lock );
  996. return -EBUSY;
  997. }
  998. ++dev->buf_use; /* Can't allocate more after this call */
  999. spin_unlock( &dev->count_lock );
  1000. if ( copy_from_user( &request, argp, sizeof(request) ) )
  1001. return -EFAULT;
  1002. for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
  1003. if ( dma->bufs[i].buf_count ) ++count;
  1004. }
  1005. DRM_DEBUG( "count = %d\n", count );
  1006. if ( request.count >= count ) {
  1007. for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
  1008. if ( dma->bufs[i].buf_count ) {
  1009. drm_buf_desc_t __user *to = &request.list[count];
  1010. drm_buf_entry_t *from = &dma->bufs[i];
  1011. drm_freelist_t *list = &dma->bufs[i].freelist;
  1012. if ( copy_to_user( &to->count,
  1013. &from->buf_count,
  1014. sizeof(from->buf_count) ) ||
  1015. copy_to_user( &to->size,
  1016. &from->buf_size,
  1017. sizeof(from->buf_size) ) ||
  1018. copy_to_user( &to->low_mark,
  1019. &list->low_mark,
  1020. sizeof(list->low_mark) ) ||
  1021. copy_to_user( &to->high_mark,
  1022. &list->high_mark,
  1023. sizeof(list->high_mark) ) )
  1024. return -EFAULT;
  1025. DRM_DEBUG( "%d %d %d %d %d\n",
  1026. i,
  1027. dma->bufs[i].buf_count,
  1028. dma->bufs[i].buf_size,
  1029. dma->bufs[i].freelist.low_mark,
  1030. dma->bufs[i].freelist.high_mark );
  1031. ++count;
  1032. }
  1033. }
  1034. }
  1035. request.count = count;
  1036. if ( copy_to_user( argp, &request, sizeof(request) ) )
  1037. return -EFAULT;
  1038. return 0;
  1039. }
  1040. /**
  1041. * Specifies a low and high water mark for buffer allocation
  1042. *
  1043. * \param inode device inode.
  1044. * \param filp file pointer.
  1045. * \param cmd command.
  1046. * \param arg a pointer to a drm_buf_desc structure.
  1047. * \return zero on success or a negative number on failure.
  1048. *
  1049. * Verifies that the size order is bounded between the admissible orders and
  1050. * updates the respective drm_device_dma::bufs entry low and high water mark.
  1051. *
  1052. * \note This ioctl is deprecated and mostly never used.
  1053. */
  1054. int drm_markbufs( struct inode *inode, struct file *filp,
  1055. unsigned int cmd, unsigned long arg )
  1056. {
  1057. drm_file_t *priv = filp->private_data;
  1058. drm_device_t *dev = priv->head->dev;
  1059. drm_device_dma_t *dma = dev->dma;
  1060. drm_buf_desc_t request;
  1061. int order;
  1062. drm_buf_entry_t *entry;
  1063. if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
  1064. return -EINVAL;
  1065. if ( !dma ) return -EINVAL;
  1066. if ( copy_from_user( &request,
  1067. (drm_buf_desc_t __user *)arg,
  1068. sizeof(request) ) )
  1069. return -EFAULT;
  1070. DRM_DEBUG( "%d, %d, %d\n",
  1071. request.size, request.low_mark, request.high_mark );
  1072. order = drm_order( request.size );
  1073. if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
  1074. entry = &dma->bufs[order];
  1075. if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
  1076. return -EINVAL;
  1077. if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
  1078. return -EINVAL;
  1079. entry->freelist.low_mark = request.low_mark;
  1080. entry->freelist.high_mark = request.high_mark;
  1081. return 0;
  1082. }
  1083. /**
  1084. * Unreserve the buffers in list, previously reserved using drmDMA.
  1085. *
  1086. * \param inode device inode.
  1087. * \param filp file pointer.
  1088. * \param cmd command.
  1089. * \param arg pointer to a drm_buf_free structure.
  1090. * \return zero on success or a negative number on failure.
  1091. *
  1092. * Calls free_buffer() for each used buffer.
  1093. * This function is primarily used for debugging.
  1094. */
  1095. int drm_freebufs( struct inode *inode, struct file *filp,
  1096. unsigned int cmd, unsigned long arg )
  1097. {
  1098. drm_file_t *priv = filp->private_data;
  1099. drm_device_t *dev = priv->head->dev;
  1100. drm_device_dma_t *dma = dev->dma;
  1101. drm_buf_free_t request;
  1102. int i;
  1103. int idx;
  1104. drm_buf_t *buf;
  1105. if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
  1106. return -EINVAL;
  1107. if ( !dma ) return -EINVAL;
  1108. if ( copy_from_user( &request,
  1109. (drm_buf_free_t __user *)arg,
  1110. sizeof(request) ) )
  1111. return -EFAULT;
  1112. DRM_DEBUG( "%d\n", request.count );
  1113. for ( i = 0 ; i < request.count ; i++ ) {
  1114. if ( copy_from_user( &idx,
  1115. &request.list[i],
  1116. sizeof(idx) ) )
  1117. return -EFAULT;
  1118. if ( idx < 0 || idx >= dma->buf_count ) {
  1119. DRM_ERROR( "Index %d (of %d max)\n",
  1120. idx, dma->buf_count - 1 );
  1121. return -EINVAL;
  1122. }
  1123. buf = dma->buflist[idx];
  1124. if ( buf->filp != filp ) {
  1125. DRM_ERROR( "Process %d freeing buffer not owned\n",
  1126. current->pid );
  1127. return -EINVAL;
  1128. }
  1129. drm_free_buffer( dev, buf );
  1130. }
  1131. return 0;
  1132. }
  1133. /**
  1134. * Maps all of the DMA buffers into client-virtual space (ioctl).
  1135. *
  1136. * \param inode device inode.
  1137. * \param filp file pointer.
  1138. * \param cmd command.
  1139. * \param arg pointer to a drm_buf_map structure.
  1140. * \return zero on success or a negative number on failure.
  1141. *
  1142. * Maps the AGP or SG buffer region with do_mmap(), and copies information
  1143. * about each buffer into user space. The PCI buffers are already mapped on the
  1144. * addbufs_pci() call.
  1145. */
  1146. int drm_mapbufs( struct inode *inode, struct file *filp,
  1147. unsigned int cmd, unsigned long arg )
  1148. {
  1149. drm_file_t *priv = filp->private_data;
  1150. drm_device_t *dev = priv->head->dev;
  1151. drm_device_dma_t *dma = dev->dma;
  1152. drm_buf_map_t __user *argp = (void __user *)arg;
  1153. int retcode = 0;
  1154. const int zero = 0;
  1155. unsigned long virtual;
  1156. unsigned long address;
  1157. drm_buf_map_t request;
  1158. int i;
  1159. if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
  1160. return -EINVAL;
  1161. if ( !dma ) return -EINVAL;
  1162. spin_lock( &dev->count_lock );
  1163. if ( atomic_read( &dev->buf_alloc ) ) {
  1164. spin_unlock( &dev->count_lock );
  1165. return -EBUSY;
  1166. }
  1167. dev->buf_use++; /* Can't allocate more after this call */
  1168. spin_unlock( &dev->count_lock );
  1169. if ( copy_from_user( &request, argp, sizeof(request) ) )
  1170. return -EFAULT;
  1171. if ( request.count >= dma->buf_count ) {
  1172. if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
  1173. || (drm_core_check_feature(dev, DRIVER_SG)
  1174. && (dma->flags & _DRM_DMA_USE_SG))
  1175. || (drm_core_check_feature(dev, DRIVER_FB_DMA)
  1176. && (dma->flags & _DRM_DMA_USE_FB))) {
  1177. drm_map_t *map = dev->agp_buffer_map;
  1178. if ( !map ) {
  1179. retcode = -EINVAL;
  1180. goto done;
  1181. }
  1182. #if LINUX_VERSION_CODE <= 0x020402
  1183. down( &current->mm->mmap_sem );
  1184. #else
  1185. down_write( &current->mm->mmap_sem );
  1186. #endif
  1187. virtual = do_mmap( filp, 0, map->size,
  1188. PROT_READ | PROT_WRITE,
  1189. MAP_SHARED,
  1190. (unsigned long)map->offset );
  1191. #if LINUX_VERSION_CODE <= 0x020402
  1192. up( &current->mm->mmap_sem );
  1193. #else
  1194. up_write( &current->mm->mmap_sem );
  1195. #endif
  1196. } else {
  1197. #if LINUX_VERSION_CODE <= 0x020402
  1198. down( &current->mm->mmap_sem );
  1199. #else
  1200. down_write( &current->mm->mmap_sem );
  1201. #endif
  1202. virtual = do_mmap( filp, 0, dma->byte_count,
  1203. PROT_READ | PROT_WRITE,
  1204. MAP_SHARED, 0 );
  1205. #if LINUX_VERSION_CODE <= 0x020402
  1206. up( &current->mm->mmap_sem );
  1207. #else
  1208. up_write( &current->mm->mmap_sem );
  1209. #endif
  1210. }
  1211. if ( virtual > -1024UL ) {
  1212. /* Real error */
  1213. retcode = (signed long)virtual;
  1214. goto done;
  1215. }
  1216. request.virtual = (void __user *)virtual;
  1217. for ( i = 0 ; i < dma->buf_count ; i++ ) {
  1218. if ( copy_to_user( &request.list[i].idx,
  1219. &dma->buflist[i]->idx,
  1220. sizeof(request.list[0].idx) ) ) {
  1221. retcode = -EFAULT;
  1222. goto done;
  1223. }
  1224. if ( copy_to_user( &request.list[i].total,
  1225. &dma->buflist[i]->total,
  1226. sizeof(request.list[0].total) ) ) {
  1227. retcode = -EFAULT;
  1228. goto done;
  1229. }
  1230. if ( copy_to_user( &request.list[i].used,
  1231. &zero,
  1232. sizeof(zero) ) ) {
  1233. retcode = -EFAULT;
  1234. goto done;
  1235. }
  1236. address = virtual + dma->buflist[i]->offset; /* *** */
  1237. if ( copy_to_user( &request.list[i].address,
  1238. &address,
  1239. sizeof(address) ) ) {
  1240. retcode = -EFAULT;
  1241. goto done;
  1242. }
  1243. }
  1244. }
  1245. done:
  1246. request.count = dma->buf_count;
  1247. DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
  1248. if ( copy_to_user( argp, &request, sizeof(request) ) )
  1249. return -EFAULT;
  1250. return retcode;
  1251. }