saa7146_hlp.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <linux/kernel.h>
  3. #include <media/saa7146_vv.h>
  4. static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
  5. {
  6. /* clear out the necessary bits */
  7. *clip_format &= 0x0000ffff;
  8. /* set these bits new */
  9. *clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16));
  10. }
  11. static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl)
  12. {
  13. *hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28);
  14. *hps_ctrl |= (source << 30) | (sync << 28);
  15. }
  16. static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl)
  17. {
  18. int hyo = 0, hxo = 0;
  19. hyo = vv->standard->v_offset;
  20. hxo = vv->standard->h_offset;
  21. *hps_h_scale &= ~(MASK_B0 | 0xf00);
  22. *hps_h_scale |= (hxo << 0);
  23. *hps_ctrl &= ~(MASK_W0 | MASK_B2);
  24. *hps_ctrl |= (hyo << 12);
  25. }
  26. /* helper functions for the calculation of the horizontal- and vertical
  27. scaling registers, clip-format-register etc ...
  28. these functions take pointers to the (most-likely read-out
  29. original-values) and manipulate them according to the requested
  30. changes.
  31. */
  32. /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
  33. static struct {
  34. u16 hps_coeff;
  35. u16 weight_sum;
  36. } hps_h_coeff_tab [] = {
  37. {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8},
  38. {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8},
  39. {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8},
  40. {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8},
  41. {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8},
  42. {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8},
  43. {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
  44. {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
  45. {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
  46. {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8},
  47. {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8},
  48. {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8},
  49. {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16}
  50. };
  51. /* table of attenuation values for horizontal scaling */
  52. static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
  53. /* calculate horizontal scale registers */
  54. static int calculate_h_scale_registers(struct saa7146_dev *dev,
  55. int in_x, int out_x, int flip_lr,
  56. u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale)
  57. {
  58. /* horizontal prescaler */
  59. u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0;
  60. /* horizontal scaler */
  61. u32 xim = 0, xp = 0, xsci =0;
  62. /* vertical scale & gain */
  63. u32 pfuv = 0;
  64. /* helper variables */
  65. u32 h_atten = 0, i = 0;
  66. if ( 0 == out_x ) {
  67. return -EINVAL;
  68. }
  69. /* mask out vanity-bit */
  70. *hps_ctrl &= ~MASK_29;
  71. /* calculate prescale-(xspc)-value: [n .. 1/2) : 1
  72. [1/2 .. 1/3) : 2
  73. [1/3 .. 1/4) : 3
  74. ... */
  75. if (in_x > out_x) {
  76. xpsc = in_x / out_x;
  77. }
  78. else {
  79. /* zooming */
  80. xpsc = 1;
  81. }
  82. /* if flip_lr-bit is set, number of pixels after
  83. horizontal prescaling must be < 384 */
  84. if ( 0 != flip_lr ) {
  85. /* set vanity bit */
  86. *hps_ctrl |= MASK_29;
  87. while (in_x / xpsc >= 384 )
  88. xpsc++;
  89. }
  90. /* if zooming is wanted, number of pixels after
  91. horizontal prescaling must be < 768 */
  92. else {
  93. while ( in_x / xpsc >= 768 )
  94. xpsc++;
  95. }
  96. /* maximum prescale is 64 (p.69) */
  97. if ( xpsc > 64 )
  98. xpsc = 64;
  99. /* keep xacm clear*/
  100. xacm = 0;
  101. /* set horizontal filter parameters (CXY = CXUV) */
  102. cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff;
  103. cxuv = cxy;
  104. /* calculate and set horizontal fine scale (xsci) */
  105. /* bypass the horizontal scaler ? */
  106. if ( (in_x == out_x) && ( 1 == xpsc ) )
  107. xsci = 0x400;
  108. else
  109. xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc;
  110. /* set start phase for horizontal fine scale (xp) to 0 */
  111. xp = 0;
  112. /* set xim, if we bypass the horizontal scaler */
  113. if ( 0x400 == xsci )
  114. xim = 1;
  115. else
  116. xim = 0;
  117. /* if the prescaler is bypassed, enable horizontal
  118. accumulation mode (xacm) and clear dcgx */
  119. if( 1 == xpsc ) {
  120. xacm = 1;
  121. dcgx = 0;
  122. } else {
  123. xacm = 0;
  124. /* get best match in the table of attenuations
  125. for horizontal scaling */
  126. h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum;
  127. for (i = 0; h_attenuation[i] != 0; i++) {
  128. if (h_attenuation[i] >= h_atten)
  129. break;
  130. }
  131. dcgx = i;
  132. }
  133. /* the horizontal scaling increment controls the UV filter
  134. to reduce the bandwidth to improve the display quality,
  135. so set it ... */
  136. if ( xsci == 0x400)
  137. pfuv = 0x00;
  138. else if ( xsci < 0x600)
  139. pfuv = 0x01;
  140. else if ( xsci < 0x680)
  141. pfuv = 0x11;
  142. else if ( xsci < 0x700)
  143. pfuv = 0x22;
  144. else
  145. pfuv = 0x33;
  146. *hps_v_gain &= MASK_W0|MASK_B2;
  147. *hps_v_gain |= (pfuv << 24);
  148. *hps_h_scale &= ~(MASK_W1 | 0xf000);
  149. *hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12);
  150. *hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0);
  151. return 0;
  152. }
  153. static struct {
  154. u16 hps_coeff;
  155. u16 weight_sum;
  156. } hps_v_coeff_tab [] = {
  157. {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8},
  158. {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16},
  159. {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16},
  160. {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32},
  161. {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32},
  162. {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32},
  163. {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
  164. {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
  165. {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
  166. {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64},
  167. {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64},
  168. {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64},
  169. {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128}
  170. };
  171. /* table of attenuation values for vertical scaling */
  172. static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
  173. /* calculate vertical scale registers */
  174. static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field,
  175. int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain)
  176. {
  177. int lpi = 0;
  178. /* vertical scaling */
  179. u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0;
  180. /* vertical scale & gain */
  181. u32 dcgy = 0, cya_cyb = 0;
  182. /* helper variables */
  183. u32 v_atten = 0, i = 0;
  184. /* error, if vertical zooming */
  185. if ( in_y < out_y ) {
  186. return -EINVAL;
  187. }
  188. /* linear phase interpolation may be used
  189. if scaling is between 1 and 1/2 (both fields used)
  190. or scaling is between 1/2 and 1/4 (if only one field is used) */
  191. if (V4L2_FIELD_HAS_BOTH(field)) {
  192. if( 2*out_y >= in_y) {
  193. lpi = 1;
  194. }
  195. } else if (field == V4L2_FIELD_TOP
  196. || field == V4L2_FIELD_ALTERNATE
  197. || field == V4L2_FIELD_BOTTOM) {
  198. if( 4*out_y >= in_y ) {
  199. lpi = 1;
  200. }
  201. out_y *= 2;
  202. }
  203. if( 0 != lpi ) {
  204. yacm = 0;
  205. yacl = 0;
  206. cya_cyb = 0x00ff;
  207. /* calculate scaling increment */
  208. if ( in_y > out_y )
  209. ysci = ((1024 * in_y) / (out_y + 1)) - 1024;
  210. else
  211. ysci = 0;
  212. dcgy = 0;
  213. /* calculate ype and ypo */
  214. ype = ysci / 16;
  215. ypo = ype + (ysci / 64);
  216. } else {
  217. yacm = 1;
  218. /* calculate scaling increment */
  219. ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10;
  220. /* calculate ype and ypo */
  221. ypo = ype = ((ysci + 15) / 16);
  222. /* the sequence length interval (yacl) has to be set according
  223. to the prescale value, e.g. [n .. 1/2) : 0
  224. [1/2 .. 1/3) : 1
  225. [1/3 .. 1/4) : 2
  226. ... */
  227. if ( ysci < 512) {
  228. yacl = 0;
  229. } else {
  230. yacl = ( ysci / (1024 - ysci) );
  231. }
  232. /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
  233. cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff;
  234. /* get best match in the table of attenuations for vertical scaling */
  235. v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum;
  236. for (i = 0; v_attenuation[i] != 0; i++) {
  237. if (v_attenuation[i] >= v_atten)
  238. break;
  239. }
  240. dcgy = i;
  241. }
  242. /* ypo and ype swapped in spec ? */
  243. *hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1);
  244. *hps_v_gain &= ~(MASK_W0|MASK_B2);
  245. *hps_v_gain |= (dcgy << 16) | (cya_cyb << 0);
  246. return 0;
  247. }
  248. /* simple bubble-sort algorithm with duplicate elimination */
  249. static int sort_and_eliminate(u32* values, int* count)
  250. {
  251. int low = 0, high = 0, top = 0, temp = 0;
  252. int cur = 0, next = 0;
  253. /* sanity checks */
  254. if( (0 > *count) || (NULL == values) ) {
  255. return -EINVAL;
  256. }
  257. /* bubble sort the first @count items of the array @values */
  258. for( top = *count; top > 0; top--) {
  259. for( low = 0, high = 1; high < top; low++, high++) {
  260. if( values[low] > values[high] ) {
  261. temp = values[low];
  262. values[low] = values[high];
  263. values[high] = temp;
  264. }
  265. }
  266. }
  267. /* remove duplicate items */
  268. for( cur = 0, next = 1; next < *count; next++) {
  269. if( values[cur] != values[next])
  270. values[++cur] = values[next];
  271. }
  272. *count = cur + 1;
  273. return 0;
  274. }
  275. static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct saa7146_fh *fh,
  276. struct saa7146_video_dma *vdma2, u32* clip_format, u32* arbtr_ctrl, enum v4l2_field field)
  277. {
  278. struct saa7146_vv *vv = dev->vv_data;
  279. __le32 *clipping = vv->d_clipping.cpu_addr;
  280. int width = fh->ov.win.w.width;
  281. int height = fh->ov.win.w.height;
  282. int clipcount = fh->ov.nclips;
  283. u32 line_list[32];
  284. u32 pixel_list[32];
  285. int numdwords = 0;
  286. int i = 0, j = 0;
  287. int cnt_line = 0, cnt_pixel = 0;
  288. int x[32], y[32], w[32], h[32];
  289. /* clear out memory */
  290. memset(&line_list[0], 0x00, sizeof(u32)*32);
  291. memset(&pixel_list[0], 0x00, sizeof(u32)*32);
  292. memset(clipping, 0x00, SAA7146_CLIPPING_MEM);
  293. /* fill the line and pixel-lists */
  294. for(i = 0; i < clipcount; i++) {
  295. int l = 0, r = 0, t = 0, b = 0;
  296. x[i] = fh->ov.clips[i].c.left;
  297. y[i] = fh->ov.clips[i].c.top;
  298. w[i] = fh->ov.clips[i].c.width;
  299. h[i] = fh->ov.clips[i].c.height;
  300. if( w[i] < 0) {
  301. x[i] += w[i]; w[i] = -w[i];
  302. }
  303. if( h[i] < 0) {
  304. y[i] += h[i]; h[i] = -h[i];
  305. }
  306. if( x[i] < 0) {
  307. w[i] += x[i]; x[i] = 0;
  308. }
  309. if( y[i] < 0) {
  310. h[i] += y[i]; y[i] = 0;
  311. }
  312. if( 0 != vv->vflip ) {
  313. y[i] = height - y[i] - h[i];
  314. }
  315. l = x[i];
  316. r = x[i]+w[i];
  317. t = y[i];
  318. b = y[i]+h[i];
  319. /* insert left/right coordinates */
  320. pixel_list[ 2*i ] = min_t(int, l, width);
  321. pixel_list[(2*i)+1] = min_t(int, r, width);
  322. /* insert top/bottom coordinates */
  323. line_list[ 2*i ] = min_t(int, t, height);
  324. line_list[(2*i)+1] = min_t(int, b, height);
  325. }
  326. /* sort and eliminate lists */
  327. cnt_line = cnt_pixel = 2*clipcount;
  328. sort_and_eliminate( &pixel_list[0], &cnt_pixel );
  329. sort_and_eliminate( &line_list[0], &cnt_line );
  330. /* calculate the number of used u32s */
  331. numdwords = max_t(int, (cnt_line+1), (cnt_pixel+1))*2;
  332. numdwords = max_t(int, 4, numdwords);
  333. numdwords = min_t(int, 64, numdwords);
  334. /* fill up cliptable */
  335. for(i = 0; i < cnt_pixel; i++) {
  336. clipping[2*i] |= cpu_to_le32(pixel_list[i] << 16);
  337. }
  338. for(i = 0; i < cnt_line; i++) {
  339. clipping[(2*i)+1] |= cpu_to_le32(line_list[i] << 16);
  340. }
  341. /* fill up cliptable with the display infos */
  342. for(j = 0; j < clipcount; j++) {
  343. for(i = 0; i < cnt_pixel; i++) {
  344. if( x[j] < 0)
  345. x[j] = 0;
  346. if( pixel_list[i] < (x[j] + w[j])) {
  347. if ( pixel_list[i] >= x[j] ) {
  348. clipping[2*i] |= cpu_to_le32(1 << j);
  349. }
  350. }
  351. }
  352. for(i = 0; i < cnt_line; i++) {
  353. if( y[j] < 0)
  354. y[j] = 0;
  355. if( line_list[i] < (y[j] + h[j]) ) {
  356. if( line_list[i] >= y[j] ) {
  357. clipping[(2*i)+1] |= cpu_to_le32(1 << j);
  358. }
  359. }
  360. }
  361. }
  362. /* adjust arbitration control register */
  363. *arbtr_ctrl &= 0xffff00ff;
  364. *arbtr_ctrl |= 0x00001c00;
  365. vdma2->base_even = vv->d_clipping.dma_handle;
  366. vdma2->base_odd = vv->d_clipping.dma_handle;
  367. vdma2->prot_addr = vv->d_clipping.dma_handle+((sizeof(u32))*(numdwords));
  368. vdma2->base_page = 0x04;
  369. vdma2->pitch = 0x00;
  370. vdma2->num_line_byte = (0 << 16 | (sizeof(u32))*(numdwords-1) );
  371. /* set clipping-mode. this depends on the field(s) used */
  372. *clip_format &= 0xfffffff7;
  373. if (V4L2_FIELD_HAS_BOTH(field)) {
  374. *clip_format |= 0x00000008;
  375. } else {
  376. *clip_format |= 0x00000000;
  377. }
  378. }
  379. /* disable clipping */
  380. static void saa7146_disable_clipping(struct saa7146_dev *dev)
  381. {
  382. u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
  383. /* mask out relevant bits (=lower word)*/
  384. clip_format &= MASK_W1;
  385. /* upload clipping-registers*/
  386. saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
  387. saa7146_write(dev, MC2, (MASK_05 | MASK_21));
  388. /* disable video dma2 */
  389. saa7146_write(dev, MC1, MASK_21);
  390. }
  391. static void saa7146_set_clipping_rect(struct saa7146_fh *fh)
  392. {
  393. struct saa7146_dev *dev = fh->dev;
  394. enum v4l2_field field = fh->ov.win.field;
  395. struct saa7146_video_dma vdma2;
  396. u32 clip_format;
  397. u32 arbtr_ctrl;
  398. /* check clipcount, disable clipping if clipcount == 0*/
  399. if( fh->ov.nclips == 0 ) {
  400. saa7146_disable_clipping(dev);
  401. return;
  402. }
  403. clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
  404. arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
  405. calculate_clipping_registers_rect(dev, fh, &vdma2, &clip_format, &arbtr_ctrl, field);
  406. /* set clipping format */
  407. clip_format &= 0xffff0008;
  408. clip_format |= (SAA7146_CLIPPING_RECT << 4);
  409. /* prepare video dma2 */
  410. saa7146_write(dev, BASE_EVEN2, vdma2.base_even);
  411. saa7146_write(dev, BASE_ODD2, vdma2.base_odd);
  412. saa7146_write(dev, PROT_ADDR2, vdma2.prot_addr);
  413. saa7146_write(dev, BASE_PAGE2, vdma2.base_page);
  414. saa7146_write(dev, PITCH2, vdma2.pitch);
  415. saa7146_write(dev, NUM_LINE_BYTE2, vdma2.num_line_byte);
  416. /* prepare the rest */
  417. saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
  418. saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
  419. /* upload clip_control-register, clipping-registers, enable video dma2 */
  420. saa7146_write(dev, MC2, (MASK_05 | MASK_21 | MASK_03 | MASK_19));
  421. saa7146_write(dev, MC1, (MASK_05 | MASK_21));
  422. }
  423. static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field)
  424. {
  425. struct saa7146_vv *vv = dev->vv_data;
  426. int source = vv->current_hps_source;
  427. int sync = vv->current_hps_sync;
  428. u32 hps_v_scale = 0, hps_v_gain = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0;
  429. /* set vertical scale */
  430. hps_v_scale = 0; /* all bits get set by the function-call */
  431. hps_v_gain = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/
  432. calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain);
  433. /* set horizontal scale */
  434. hps_ctrl = 0;
  435. hps_h_prescale = 0; /* all bits get set in the function */
  436. hps_h_scale = 0;
  437. calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale);
  438. /* set hyo and hxo */
  439. calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl);
  440. calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl);
  441. /* write out new register contents */
  442. saa7146_write(dev, HPS_V_SCALE, hps_v_scale);
  443. saa7146_write(dev, HPS_V_GAIN, hps_v_gain);
  444. saa7146_write(dev, HPS_CTRL, hps_ctrl);
  445. saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale);
  446. saa7146_write(dev, HPS_H_SCALE, hps_h_scale);
  447. /* upload shadow-ram registers */
  448. saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) );
  449. }
  450. /* calculate the new memory offsets for a desired position */
  451. static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
  452. {
  453. struct saa7146_vv *vv = dev->vv_data;
  454. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
  455. int b_depth = vv->ov_fmt->depth;
  456. int b_bpl = vv->ov_fb.fmt.bytesperline;
  457. /* The unsigned long cast is to remove a 64-bit compile warning since
  458. it looks like a 64-bit address is cast to a 32-bit value, even
  459. though the base pointer is really a 32-bit physical address that
  460. goes into a 32-bit DMA register.
  461. FIXME: might not work on some 64-bit platforms, but see the FIXME
  462. in struct v4l2_framebuffer (videodev2.h) for that.
  463. */
  464. u32 base = (u32)(unsigned long)vv->ov_fb.base;
  465. struct saa7146_video_dma vdma1;
  466. /* calculate memory offsets for picture, look if we shall top-down-flip */
  467. vdma1.pitch = 2*b_bpl;
  468. if ( 0 == vv->vflip ) {
  469. vdma1.base_even = base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
  470. vdma1.base_odd = vdma1.base_even + (vdma1.pitch / 2);
  471. vdma1.prot_addr = vdma1.base_even + (w_height * (vdma1.pitch / 2));
  472. }
  473. else {
  474. vdma1.base_even = base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
  475. vdma1.base_odd = vdma1.base_even - (vdma1.pitch / 2);
  476. vdma1.prot_addr = vdma1.base_odd - (w_height * (vdma1.pitch / 2));
  477. }
  478. if (V4L2_FIELD_HAS_BOTH(field)) {
  479. } else if (field == V4L2_FIELD_ALTERNATE) {
  480. /* fixme */
  481. vdma1.base_odd = vdma1.prot_addr;
  482. vdma1.pitch /= 2;
  483. } else if (field == V4L2_FIELD_TOP) {
  484. vdma1.base_odd = vdma1.prot_addr;
  485. vdma1.pitch /= 2;
  486. } else if (field == V4L2_FIELD_BOTTOM) {
  487. vdma1.base_odd = vdma1.base_even;
  488. vdma1.base_even = vdma1.prot_addr;
  489. vdma1.pitch /= 2;
  490. }
  491. if ( 0 != vv->vflip ) {
  492. vdma1.pitch *= -1;
  493. }
  494. vdma1.base_page = sfmt->swap;
  495. vdma1.num_line_byte = (vv->standard->v_field<<16)+vv->standard->h_pixels;
  496. saa7146_write_out_dma(dev, 1, &vdma1);
  497. }
  498. static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette)
  499. {
  500. u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
  501. /* call helper function */
  502. calculate_output_format_register(dev,palette,&clip_format);
  503. /* update the hps registers */
  504. saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format);
  505. saa7146_write(dev, MC2, (MASK_05 | MASK_21));
  506. }
  507. /* select input-source */
  508. void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync)
  509. {
  510. struct saa7146_vv *vv = dev->vv_data;
  511. u32 hps_ctrl = 0;
  512. /* read old state */
  513. hps_ctrl = saa7146_read(dev, HPS_CTRL);
  514. hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 );
  515. hps_ctrl |= (source << 30) | (sync << 28);
  516. /* write back & upload register */
  517. saa7146_write(dev, HPS_CTRL, hps_ctrl);
  518. saa7146_write(dev, MC2, (MASK_05 | MASK_21));
  519. vv->current_hps_source = source;
  520. vv->current_hps_sync = sync;
  521. }
  522. EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync);
  523. int saa7146_enable_overlay(struct saa7146_fh *fh)
  524. {
  525. struct saa7146_dev *dev = fh->dev;
  526. struct saa7146_vv *vv = dev->vv_data;
  527. saa7146_set_window(dev, fh->ov.win.w.width, fh->ov.win.w.height, fh->ov.win.field);
  528. saa7146_set_position(dev, fh->ov.win.w.left, fh->ov.win.w.top, fh->ov.win.w.height, fh->ov.win.field, vv->ov_fmt->pixelformat);
  529. saa7146_set_output_format(dev, vv->ov_fmt->trans);
  530. saa7146_set_clipping_rect(fh);
  531. /* enable video dma1 */
  532. saa7146_write(dev, MC1, (MASK_06 | MASK_22));
  533. return 0;
  534. }
  535. void saa7146_disable_overlay(struct saa7146_fh *fh)
  536. {
  537. struct saa7146_dev *dev = fh->dev;
  538. /* disable clipping + video dma1 */
  539. saa7146_disable_clipping(dev);
  540. saa7146_write(dev, MC1, MASK_22);
  541. }
  542. void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma)
  543. {
  544. int where = 0;
  545. if( which < 1 || which > 3) {
  546. return;
  547. }
  548. /* calculate starting address */
  549. where = (which-1)*0x18;
  550. saa7146_write(dev, where, vdma->base_odd);
  551. saa7146_write(dev, where+0x04, vdma->base_even);
  552. saa7146_write(dev, where+0x08, vdma->prot_addr);
  553. saa7146_write(dev, where+0x0c, vdma->pitch);
  554. saa7146_write(dev, where+0x10, vdma->base_page);
  555. saa7146_write(dev, where+0x14, vdma->num_line_byte);
  556. /* upload */
  557. saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1)));
  558. /*
  559. printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even);
  560. printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd);
  561. printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr);
  562. printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page);
  563. printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch);
  564. printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte);
  565. */
  566. }
  567. static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf)
  568. {
  569. struct saa7146_vv *vv = dev->vv_data;
  570. struct saa7146_video_dma vdma1;
  571. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  572. int width = buf->fmt->width;
  573. int height = buf->fmt->height;
  574. int bytesperline = buf->fmt->bytesperline;
  575. enum v4l2_field field = buf->fmt->field;
  576. int depth = sfmt->depth;
  577. DEB_CAP("[size=%dx%d,fields=%s]\n",
  578. width, height, v4l2_field_names[field]);
  579. if( bytesperline != 0) {
  580. vdma1.pitch = bytesperline*2;
  581. } else {
  582. vdma1.pitch = (width*depth*2)/8;
  583. }
  584. vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels);
  585. vdma1.base_page = buf->pt[0].dma | ME1 | sfmt->swap;
  586. if( 0 != vv->vflip ) {
  587. vdma1.prot_addr = buf->pt[0].offset;
  588. vdma1.base_even = buf->pt[0].offset+(vdma1.pitch/2)*height;
  589. vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2);
  590. } else {
  591. vdma1.base_even = buf->pt[0].offset;
  592. vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2);
  593. vdma1.prot_addr = buf->pt[0].offset+(vdma1.pitch/2)*height;
  594. }
  595. if (V4L2_FIELD_HAS_BOTH(field)) {
  596. } else if (field == V4L2_FIELD_ALTERNATE) {
  597. /* fixme */
  598. if ( vv->last_field == V4L2_FIELD_TOP ) {
  599. vdma1.base_odd = vdma1.prot_addr;
  600. vdma1.pitch /= 2;
  601. } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
  602. vdma1.base_odd = vdma1.base_even;
  603. vdma1.base_even = vdma1.prot_addr;
  604. vdma1.pitch /= 2;
  605. }
  606. } else if (field == V4L2_FIELD_TOP) {
  607. vdma1.base_odd = vdma1.prot_addr;
  608. vdma1.pitch /= 2;
  609. } else if (field == V4L2_FIELD_BOTTOM) {
  610. vdma1.base_odd = vdma1.base_even;
  611. vdma1.base_even = vdma1.prot_addr;
  612. vdma1.pitch /= 2;
  613. }
  614. if( 0 != vv->vflip ) {
  615. vdma1.pitch *= -1;
  616. }
  617. saa7146_write_out_dma(dev, 1, &vdma1);
  618. return 0;
  619. }
  620. static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
  621. {
  622. int height = buf->fmt->height;
  623. int width = buf->fmt->width;
  624. vdma2->pitch = width;
  625. vdma3->pitch = width;
  626. /* fixme: look at bytesperline! */
  627. if( 0 != vv->vflip ) {
  628. vdma2->prot_addr = buf->pt[1].offset;
  629. vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[1].offset;
  630. vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2);
  631. vdma3->prot_addr = buf->pt[2].offset;
  632. vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[2].offset;
  633. vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2);
  634. } else {
  635. vdma3->base_even = buf->pt[2].offset;
  636. vdma3->base_odd = vdma3->base_even + (vdma3->pitch/2);
  637. vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset;
  638. vdma2->base_even = buf->pt[1].offset;
  639. vdma2->base_odd = vdma2->base_even + (vdma2->pitch/2);
  640. vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset;
  641. }
  642. return 0;
  643. }
  644. static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
  645. {
  646. int height = buf->fmt->height;
  647. int width = buf->fmt->width;
  648. vdma2->pitch = width/2;
  649. vdma3->pitch = width/2;
  650. if( 0 != vv->vflip ) {
  651. vdma2->prot_addr = buf->pt[2].offset;
  652. vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[2].offset;
  653. vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2);
  654. vdma3->prot_addr = buf->pt[1].offset;
  655. vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[1].offset;
  656. vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2);
  657. } else {
  658. vdma3->base_even = buf->pt[2].offset;
  659. vdma3->base_odd = vdma3->base_even + (vdma3->pitch);
  660. vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset;
  661. vdma2->base_even = buf->pt[1].offset;
  662. vdma2->base_odd = vdma2->base_even + (vdma2->pitch);
  663. vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset;
  664. }
  665. return 0;
  666. }
  667. static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf)
  668. {
  669. struct saa7146_vv *vv = dev->vv_data;
  670. struct saa7146_video_dma vdma1;
  671. struct saa7146_video_dma vdma2;
  672. struct saa7146_video_dma vdma3;
  673. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  674. int width = buf->fmt->width;
  675. int height = buf->fmt->height;
  676. enum v4l2_field field = buf->fmt->field;
  677. BUG_ON(0 == buf->pt[0].dma);
  678. BUG_ON(0 == buf->pt[1].dma);
  679. BUG_ON(0 == buf->pt[2].dma);
  680. DEB_CAP("[size=%dx%d,fields=%s]\n",
  681. width, height, v4l2_field_names[field]);
  682. /* fixme: look at bytesperline! */
  683. /* fixme: what happens for user space buffers here?. The offsets are
  684. most likely wrong, this version here only works for page-aligned
  685. buffers, modifications to the pagetable-functions are necessary...*/
  686. vdma1.pitch = width*2;
  687. vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels);
  688. vdma1.base_page = buf->pt[0].dma | ME1;
  689. if( 0 != vv->vflip ) {
  690. vdma1.prot_addr = buf->pt[0].offset;
  691. vdma1.base_even = ((vdma1.pitch/2)*height)+buf->pt[0].offset;
  692. vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2);
  693. } else {
  694. vdma1.base_even = buf->pt[0].offset;
  695. vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2);
  696. vdma1.prot_addr = (vdma1.pitch/2)*height+buf->pt[0].offset;
  697. }
  698. vdma2.num_line_byte = 0; /* unused */
  699. vdma2.base_page = buf->pt[1].dma | ME1;
  700. vdma3.num_line_byte = 0; /* unused */
  701. vdma3.base_page = buf->pt[2].dma | ME1;
  702. switch( sfmt->depth ) {
  703. case 12: {
  704. calc_planar_420(vv,buf,&vdma2,&vdma3);
  705. break;
  706. }
  707. case 16: {
  708. calc_planar_422(vv,buf,&vdma2,&vdma3);
  709. break;
  710. }
  711. default: {
  712. return -1;
  713. }
  714. }
  715. if (V4L2_FIELD_HAS_BOTH(field)) {
  716. } else if (field == V4L2_FIELD_ALTERNATE) {
  717. /* fixme */
  718. vdma1.base_odd = vdma1.prot_addr;
  719. vdma1.pitch /= 2;
  720. vdma2.base_odd = vdma2.prot_addr;
  721. vdma2.pitch /= 2;
  722. vdma3.base_odd = vdma3.prot_addr;
  723. vdma3.pitch /= 2;
  724. } else if (field == V4L2_FIELD_TOP) {
  725. vdma1.base_odd = vdma1.prot_addr;
  726. vdma1.pitch /= 2;
  727. vdma2.base_odd = vdma2.prot_addr;
  728. vdma2.pitch /= 2;
  729. vdma3.base_odd = vdma3.prot_addr;
  730. vdma3.pitch /= 2;
  731. } else if (field == V4L2_FIELD_BOTTOM) {
  732. vdma1.base_odd = vdma1.base_even;
  733. vdma1.base_even = vdma1.prot_addr;
  734. vdma1.pitch /= 2;
  735. vdma2.base_odd = vdma2.base_even;
  736. vdma2.base_even = vdma2.prot_addr;
  737. vdma2.pitch /= 2;
  738. vdma3.base_odd = vdma3.base_even;
  739. vdma3.base_even = vdma3.prot_addr;
  740. vdma3.pitch /= 2;
  741. }
  742. if( 0 != vv->vflip ) {
  743. vdma1.pitch *= -1;
  744. vdma2.pitch *= -1;
  745. vdma3.pitch *= -1;
  746. }
  747. saa7146_write_out_dma(dev, 1, &vdma1);
  748. if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) {
  749. saa7146_write_out_dma(dev, 3, &vdma2);
  750. saa7146_write_out_dma(dev, 2, &vdma3);
  751. } else {
  752. saa7146_write_out_dma(dev, 2, &vdma2);
  753. saa7146_write_out_dma(dev, 3, &vdma3);
  754. }
  755. return 0;
  756. }
  757. static void program_capture_engine(struct saa7146_dev *dev, int planar)
  758. {
  759. struct saa7146_vv *vv = dev->vv_data;
  760. int count = 0;
  761. unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
  762. unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
  763. /* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/
  764. WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait);
  765. WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait);
  766. /* set rps register 0 */
  767. WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4));
  768. WRITE_RPS0(MASK_27 | MASK_11);
  769. /* turn on video-dma1 */
  770. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  771. WRITE_RPS0(MASK_06 | MASK_22); /* => mask */
  772. WRITE_RPS0(MASK_06 | MASK_22); /* => values */
  773. if( 0 != planar ) {
  774. /* turn on video-dma2 */
  775. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  776. WRITE_RPS0(MASK_05 | MASK_21); /* => mask */
  777. WRITE_RPS0(MASK_05 | MASK_21); /* => values */
  778. /* turn on video-dma3 */
  779. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  780. WRITE_RPS0(MASK_04 | MASK_20); /* => mask */
  781. WRITE_RPS0(MASK_04 | MASK_20); /* => values */
  782. }
  783. /* wait for o_fid_a/b / e_fid_a/b toggle */
  784. if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
  785. WRITE_RPS0(CMD_PAUSE | o_wait);
  786. WRITE_RPS0(CMD_PAUSE | e_wait);
  787. } else if ( vv->last_field == V4L2_FIELD_TOP ) {
  788. WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
  789. WRITE_RPS0(CMD_PAUSE | o_wait);
  790. } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
  791. WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
  792. WRITE_RPS0(CMD_PAUSE | e_wait);
  793. }
  794. /* turn off video-dma1 */
  795. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  796. WRITE_RPS0(MASK_22 | MASK_06); /* => mask */
  797. WRITE_RPS0(MASK_22); /* => values */
  798. if( 0 != planar ) {
  799. /* turn off video-dma2 */
  800. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  801. WRITE_RPS0(MASK_05 | MASK_21); /* => mask */
  802. WRITE_RPS0(MASK_21); /* => values */
  803. /* turn off video-dma3 */
  804. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  805. WRITE_RPS0(MASK_04 | MASK_20); /* => mask */
  806. WRITE_RPS0(MASK_20); /* => values */
  807. }
  808. /* generate interrupt */
  809. WRITE_RPS0(CMD_INTERRUPT);
  810. /* stop */
  811. WRITE_RPS0(CMD_STOP);
  812. }
  813. void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
  814. {
  815. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  816. struct saa7146_vv *vv = dev->vv_data;
  817. u32 vdma1_prot_addr;
  818. DEB_CAP("buf:%p, next:%p\n", buf, next);
  819. vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
  820. if( 0 == vdma1_prot_addr ) {
  821. /* clear out beginning of streaming bit (rps register 0)*/
  822. DEB_CAP("forcing sync to new frame\n");
  823. saa7146_write(dev, MC2, MASK_27 );
  824. }
  825. saa7146_set_window(dev, buf->fmt->width, buf->fmt->height, buf->fmt->field);
  826. saa7146_set_output_format(dev, sfmt->trans);
  827. saa7146_disable_clipping(dev);
  828. if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
  829. } else if ( vv->last_field == V4L2_FIELD_TOP ) {
  830. vv->last_field = V4L2_FIELD_BOTTOM;
  831. } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
  832. vv->last_field = V4L2_FIELD_TOP;
  833. }
  834. if( 0 != IS_PLANAR(sfmt->trans)) {
  835. calculate_video_dma_grab_planar(dev, buf);
  836. program_capture_engine(dev,1);
  837. } else {
  838. calculate_video_dma_grab_packed(dev, buf);
  839. program_capture_engine(dev,0);
  840. }
  841. /*
  842. printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
  843. printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
  844. printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
  845. printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
  846. printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1));
  847. printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
  848. printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1));
  849. */
  850. /* write the address of the rps-program */
  851. saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
  852. /* turn on rps */
  853. saa7146_write(dev, MC1, (MASK_12 | MASK_28));
  854. }