nv_accel.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /***************************************************************************\
  2. |* *|
  3. |* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
  4. |* *|
  5. |* NOTICE TO USER: The source code is copyrighted under U.S. and *|
  6. |* international laws. Users and possessors of this source code are *|
  7. |* hereby granted a nonexclusive, royalty-free copyright license to *|
  8. |* use this code in individual and commercial software. *|
  9. |* *|
  10. |* Any use of this source code must include, in the user documenta- *|
  11. |* tion and internal comments to the code, notices to the end user *|
  12. |* as follows: *|
  13. |* *|
  14. |* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
  15. |* *|
  16. |* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *|
  17. |* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *|
  18. |* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *|
  19. |* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *|
  20. |* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *|
  21. |* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *|
  22. |* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *|
  23. |* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *|
  24. |* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *|
  25. |* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *|
  26. |* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *|
  27. |* *|
  28. |* U.S. Government End Users. This source code is a "commercial *|
  29. |* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *|
  30. |* consisting of "commercial computer software" and "commercial *|
  31. |* computer software documentation," as such terms are used in *|
  32. |* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *|
  33. |* ment only as a commercial end item. Consistent with 48 C.F.R. *|
  34. |* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *|
  35. |* all U.S. Government End Users acquire the source code with only *|
  36. |* those rights set forth herein. *|
  37. |* *|
  38. \***************************************************************************/
  39. /*
  40. * GPL Licensing Note - According to Mark Vojkovich, author of the Xorg/
  41. * XFree86 'nv' driver, this source code is provided under MIT-style licensing
  42. * where the source code is provided "as is" without warranty of any kind.
  43. * The only usage restriction is for the copyright notices to be retained
  44. * whenever code is used.
  45. *
  46. * Antonino Daplas <adaplas@pol.net> 2005-03-11
  47. */
  48. #include <linux/fb.h>
  49. #include "nv_type.h"
  50. #include "nv_proto.h"
  51. #include "nv_dma.h"
  52. #include "nv_local.h"
  53. /* There is a HW race condition with videoram command buffers.
  54. You can't jump to the location of your put offset. We write put
  55. at the jump offset + SKIPS dwords with noop padding in between
  56. to solve this problem */
  57. #define SKIPS 8
  58. static const int NVCopyROP[16] = {
  59. 0xCC, /* copy */
  60. 0x55 /* invert */
  61. };
  62. static const int NVCopyROP_PM[16] = {
  63. 0xCA, /* copy */
  64. 0x5A, /* invert */
  65. };
  66. static inline void NVFlush(struct nvidia_par *par)
  67. {
  68. int count = 1000000000;
  69. while (--count && READ_GET(par) != par->dmaPut) ;
  70. if (!count) {
  71. printk("nvidiafb: DMA Flush lockup\n");
  72. par->lockup = 1;
  73. }
  74. }
  75. static inline void NVSync(struct nvidia_par *par)
  76. {
  77. int count = 1000000000;
  78. while (--count && NV_RD32(par->PGRAPH, 0x0700)) ;
  79. if (!count) {
  80. printk("nvidiafb: DMA Sync lockup\n");
  81. par->lockup = 1;
  82. }
  83. }
  84. static void NVDmaKickoff(struct nvidia_par *par)
  85. {
  86. if (par->dmaCurrent != par->dmaPut) {
  87. par->dmaPut = par->dmaCurrent;
  88. WRITE_PUT(par, par->dmaPut);
  89. }
  90. }
  91. static void NVDmaWait(struct nvidia_par *par, int size)
  92. {
  93. int dmaGet;
  94. int count = 1000000000, cnt;
  95. size++;
  96. while (par->dmaFree < size && --count && !par->lockup) {
  97. dmaGet = READ_GET(par);
  98. if (par->dmaPut >= dmaGet) {
  99. par->dmaFree = par->dmaMax - par->dmaCurrent;
  100. if (par->dmaFree < size) {
  101. NVDmaNext(par, 0x20000000);
  102. if (dmaGet <= SKIPS) {
  103. if (par->dmaPut <= SKIPS)
  104. WRITE_PUT(par, SKIPS + 1);
  105. cnt = 1000000000;
  106. do {
  107. dmaGet = READ_GET(par);
  108. } while (--cnt && dmaGet <= SKIPS);
  109. if (!cnt) {
  110. printk("DMA Get lockup\n");
  111. par->lockup = 1;
  112. }
  113. }
  114. WRITE_PUT(par, SKIPS);
  115. par->dmaCurrent = par->dmaPut = SKIPS;
  116. par->dmaFree = dmaGet - (SKIPS + 1);
  117. }
  118. } else
  119. par->dmaFree = dmaGet - par->dmaCurrent - 1;
  120. }
  121. if (!count) {
  122. printk("DMA Wait Lockup\n");
  123. par->lockup = 1;
  124. }
  125. }
  126. static void NVSetPattern(struct nvidia_par *par, u32 clr0, u32 clr1,
  127. u32 pat0, u32 pat1)
  128. {
  129. NVDmaStart(par, PATTERN_COLOR_0, 4);
  130. NVDmaNext(par, clr0);
  131. NVDmaNext(par, clr1);
  132. NVDmaNext(par, pat0);
  133. NVDmaNext(par, pat1);
  134. }
  135. static void NVSetRopSolid(struct nvidia_par *par, u32 rop, u32 planemask)
  136. {
  137. if (planemask != ~0) {
  138. NVSetPattern(par, 0, planemask, ~0, ~0);
  139. if (par->currentRop != (rop + 32)) {
  140. NVDmaStart(par, ROP_SET, 1);
  141. NVDmaNext(par, NVCopyROP_PM[rop]);
  142. par->currentRop = rop + 32;
  143. }
  144. } else if (par->currentRop != rop) {
  145. if (par->currentRop >= 16)
  146. NVSetPattern(par, ~0, ~0, ~0, ~0);
  147. NVDmaStart(par, ROP_SET, 1);
  148. NVDmaNext(par, NVCopyROP[rop]);
  149. par->currentRop = rop;
  150. }
  151. }
  152. static void NVSetClippingRectangle(struct fb_info *info, int x1, int y1,
  153. int x2, int y2)
  154. {
  155. struct nvidia_par *par = info->par;
  156. int h = y2 - y1 + 1;
  157. int w = x2 - x1 + 1;
  158. NVDmaStart(par, CLIP_POINT, 2);
  159. NVDmaNext(par, (y1 << 16) | x1);
  160. NVDmaNext(par, (h << 16) | w);
  161. }
  162. void NVResetGraphics(struct fb_info *info)
  163. {
  164. struct nvidia_par *par = info->par;
  165. u32 surfaceFormat, patternFormat, rectFormat, lineFormat;
  166. int pitch, i;
  167. pitch = info->fix.line_length;
  168. par->dmaBase = (u32 __iomem *) (&par->FbStart[par->FbUsableSize]);
  169. for (i = 0; i < SKIPS; i++)
  170. NV_WR32(&par->dmaBase[i], 0, 0x00000000);
  171. NV_WR32(&par->dmaBase[0x0 + SKIPS], 0, 0x00040000);
  172. NV_WR32(&par->dmaBase[0x1 + SKIPS], 0, 0x80000010);
  173. NV_WR32(&par->dmaBase[0x2 + SKIPS], 0, 0x00042000);
  174. NV_WR32(&par->dmaBase[0x3 + SKIPS], 0, 0x80000011);
  175. NV_WR32(&par->dmaBase[0x4 + SKIPS], 0, 0x00044000);
  176. NV_WR32(&par->dmaBase[0x5 + SKIPS], 0, 0x80000012);
  177. NV_WR32(&par->dmaBase[0x6 + SKIPS], 0, 0x00046000);
  178. NV_WR32(&par->dmaBase[0x7 + SKIPS], 0, 0x80000013);
  179. NV_WR32(&par->dmaBase[0x8 + SKIPS], 0, 0x00048000);
  180. NV_WR32(&par->dmaBase[0x9 + SKIPS], 0, 0x80000014);
  181. NV_WR32(&par->dmaBase[0xA + SKIPS], 0, 0x0004A000);
  182. NV_WR32(&par->dmaBase[0xB + SKIPS], 0, 0x80000015);
  183. NV_WR32(&par->dmaBase[0xC + SKIPS], 0, 0x0004C000);
  184. NV_WR32(&par->dmaBase[0xD + SKIPS], 0, 0x80000016);
  185. NV_WR32(&par->dmaBase[0xE + SKIPS], 0, 0x0004E000);
  186. NV_WR32(&par->dmaBase[0xF + SKIPS], 0, 0x80000017);
  187. par->dmaPut = 0;
  188. par->dmaCurrent = 16 + SKIPS;
  189. par->dmaMax = 8191;
  190. par->dmaFree = par->dmaMax - par->dmaCurrent;
  191. switch (info->var.bits_per_pixel) {
  192. case 32:
  193. case 24:
  194. surfaceFormat = SURFACE_FORMAT_DEPTH24;
  195. patternFormat = PATTERN_FORMAT_DEPTH24;
  196. rectFormat = RECT_FORMAT_DEPTH24;
  197. lineFormat = LINE_FORMAT_DEPTH24;
  198. break;
  199. case 16:
  200. surfaceFormat = SURFACE_FORMAT_DEPTH16;
  201. patternFormat = PATTERN_FORMAT_DEPTH16;
  202. rectFormat = RECT_FORMAT_DEPTH16;
  203. lineFormat = LINE_FORMAT_DEPTH16;
  204. break;
  205. default:
  206. surfaceFormat = SURFACE_FORMAT_DEPTH8;
  207. patternFormat = PATTERN_FORMAT_DEPTH8;
  208. rectFormat = RECT_FORMAT_DEPTH8;
  209. lineFormat = LINE_FORMAT_DEPTH8;
  210. break;
  211. }
  212. NVDmaStart(par, SURFACE_FORMAT, 4);
  213. NVDmaNext(par, surfaceFormat);
  214. NVDmaNext(par, pitch | (pitch << 16));
  215. NVDmaNext(par, 0);
  216. NVDmaNext(par, 0);
  217. NVDmaStart(par, PATTERN_FORMAT, 1);
  218. NVDmaNext(par, patternFormat);
  219. NVDmaStart(par, RECT_FORMAT, 1);
  220. NVDmaNext(par, rectFormat);
  221. NVDmaStart(par, LINE_FORMAT, 1);
  222. NVDmaNext(par, lineFormat);
  223. par->currentRop = ~0; /* set to something invalid */
  224. NVSetRopSolid(par, ROP_COPY, ~0);
  225. NVSetClippingRectangle(info, 0, 0, info->var.xres_virtual,
  226. info->var.yres_virtual);
  227. NVDmaKickoff(par);
  228. }
  229. u8 byte_rev[256] = {
  230. 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
  231. 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
  232. 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
  233. 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
  234. 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
  235. 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
  236. 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
  237. 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
  238. 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
  239. 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
  240. 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
  241. 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
  242. 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
  243. 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
  244. 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
  245. 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
  246. 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
  247. 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
  248. 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
  249. 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
  250. 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
  251. 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
  252. 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
  253. 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
  254. 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
  255. 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
  256. 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
  257. 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
  258. 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
  259. 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
  260. 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
  261. 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
  262. };
  263. int nvidiafb_sync(struct fb_info *info)
  264. {
  265. struct nvidia_par *par = info->par;
  266. if (info->state != FBINFO_STATE_RUNNING)
  267. return 0;
  268. if (!par->lockup)
  269. NVFlush(par);
  270. if (!par->lockup)
  271. NVSync(par);
  272. return 0;
  273. }
  274. void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
  275. {
  276. struct nvidia_par *par = info->par;
  277. if (info->state != FBINFO_STATE_RUNNING)
  278. return;
  279. if (par->lockup)
  280. return cfb_copyarea(info, region);
  281. NVDmaStart(par, BLIT_POINT_SRC, 3);
  282. NVDmaNext(par, (region->sy << 16) | region->sx);
  283. NVDmaNext(par, (region->dy << 16) | region->dx);
  284. NVDmaNext(par, (region->height << 16) | region->width);
  285. NVDmaKickoff(par);
  286. }
  287. void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
  288. {
  289. struct nvidia_par *par = info->par;
  290. u32 color;
  291. if (info->state != FBINFO_STATE_RUNNING)
  292. return;
  293. if (par->lockup)
  294. return cfb_fillrect(info, rect);
  295. if (info->var.bits_per_pixel == 8)
  296. color = rect->color;
  297. else
  298. color = ((u32 *) info->pseudo_palette)[rect->color];
  299. if (rect->rop != ROP_COPY)
  300. NVSetRopSolid(par, rect->rop, ~0);
  301. NVDmaStart(par, RECT_SOLID_COLOR, 1);
  302. NVDmaNext(par, color);
  303. NVDmaStart(par, RECT_SOLID_RECTS(0), 2);
  304. NVDmaNext(par, (rect->dx << 16) | rect->dy);
  305. NVDmaNext(par, (rect->width << 16) | rect->height);
  306. NVDmaKickoff(par);
  307. if (rect->rop != ROP_COPY)
  308. NVSetRopSolid(par, ROP_COPY, ~0);
  309. }
  310. static void nvidiafb_mono_color_expand(struct fb_info *info,
  311. const struct fb_image *image)
  312. {
  313. struct nvidia_par *par = info->par;
  314. u32 fg, bg, mask = ~(~0 >> (32 - info->var.bits_per_pixel));
  315. u32 dsize, width, *data = (u32 *) image->data, tmp;
  316. int j, k = 0;
  317. width = (image->width + 31) & ~31;
  318. dsize = (width * image->height) >> 5;
  319. if (info->var.bits_per_pixel == 8) {
  320. fg = image->fg_color | mask;
  321. bg = image->bg_color | mask;
  322. } else {
  323. fg = ((u32 *) info->pseudo_palette)[image->fg_color] | mask;
  324. bg = ((u32 *) info->pseudo_palette)[image->bg_color] | mask;
  325. }
  326. NVDmaStart(par, RECT_EXPAND_TWO_COLOR_CLIP, 7);
  327. NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
  328. NVDmaNext(par, ((image->dy + image->height) << 16) |
  329. ((image->dx + image->width) & 0xffff));
  330. NVDmaNext(par, bg);
  331. NVDmaNext(par, fg);
  332. NVDmaNext(par, (image->height << 16) | width);
  333. NVDmaNext(par, (image->height << 16) | width);
  334. NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
  335. while (dsize >= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS) {
  336. NVDmaStart(par, RECT_EXPAND_TWO_COLOR_DATA(0),
  337. RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS);
  338. for (j = RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS; j--;) {
  339. tmp = data[k++];
  340. reverse_order(&tmp);
  341. NVDmaNext(par, tmp);
  342. }
  343. dsize -= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS;
  344. }
  345. if (dsize) {
  346. NVDmaStart(par, RECT_EXPAND_TWO_COLOR_DATA(0), dsize);
  347. for (j = dsize; j--;) {
  348. tmp = data[k++];
  349. reverse_order(&tmp);
  350. NVDmaNext(par, tmp);
  351. }
  352. }
  353. NVDmaKickoff(par);
  354. }
  355. void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image)
  356. {
  357. struct nvidia_par *par = info->par;
  358. if (info->state != FBINFO_STATE_RUNNING)
  359. return;
  360. if (image->depth == 1 && !par->lockup)
  361. nvidiafb_mono_color_expand(info, image);
  362. else
  363. cfb_imageblit(info, image);
  364. }