udlfb.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906
  1. /*
  2. * udlfb.c -- Framebuffer driver for DisplayLink USB controller
  3. *
  4. * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
  5. * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
  6. * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License v2. See the file COPYING in the main directory of this archive for
  10. * more details.
  11. *
  12. * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
  13. * usb-skeleton by GregKH.
  14. *
  15. * Device-specific portions based on information from Displaylink, with work
  16. * from Florian Echtler, Henrik Bjerregaard Pedersen, and others.
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/init.h>
  22. #include <linux/usb.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/mm.h>
  25. #include <linux/fb.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/slab.h>
  28. #include <linux/prefetch.h>
  29. #include <linux/delay.h>
  30. #include <linux/prefetch.h>
  31. #include <video/udlfb.h>
  32. #include "edid.h"
  33. static struct fb_fix_screeninfo dlfb_fix = {
  34. .id = "udlfb",
  35. .type = FB_TYPE_PACKED_PIXELS,
  36. .visual = FB_VISUAL_TRUECOLOR,
  37. .xpanstep = 0,
  38. .ypanstep = 0,
  39. .ywrapstep = 0,
  40. .accel = FB_ACCEL_NONE,
  41. };
  42. static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
  43. FBINFO_VIRTFB |
  44. FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
  45. FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
  46. /*
  47. * There are many DisplayLink-based graphics products, all with unique PIDs.
  48. * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
  49. * We also require a match on SubClass (0x00) and Protocol (0x00),
  50. * which is compatible with all known USB 2.0 era graphics chips and firmware,
  51. * but allows DisplayLink to increment those for any future incompatible chips
  52. */
  53. static struct usb_device_id id_table[] = {
  54. {.idVendor = 0x17e9,
  55. .bInterfaceClass = 0xff,
  56. .bInterfaceSubClass = 0x00,
  57. .bInterfaceProtocol = 0x00,
  58. .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
  59. USB_DEVICE_ID_MATCH_INT_CLASS |
  60. USB_DEVICE_ID_MATCH_INT_SUBCLASS |
  61. USB_DEVICE_ID_MATCH_INT_PROTOCOL,
  62. },
  63. {},
  64. };
  65. MODULE_DEVICE_TABLE(usb, id_table);
  66. /* module options */
  67. static int console; /* Optionally allow fbcon to consume first framebuffer */
  68. static int fb_defio; /* Optionally enable experimental fb_defio mmap support */
  69. /* dlfb keeps a list of urbs for efficient bulk transfers */
  70. static void dlfb_urb_completion(struct urb *urb);
  71. static struct urb *dlfb_get_urb(struct dlfb_data *dev);
  72. static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len);
  73. static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size);
  74. static void dlfb_free_urb_list(struct dlfb_data *dev);
  75. /*
  76. * All DisplayLink bulk operations start with 0xAF, followed by specific code
  77. * All operations are written to buffers which then later get sent to device
  78. */
  79. static char *dlfb_set_register(char *buf, u8 reg, u8 val)
  80. {
  81. *buf++ = 0xAF;
  82. *buf++ = 0x20;
  83. *buf++ = reg;
  84. *buf++ = val;
  85. return buf;
  86. }
  87. static char *dlfb_vidreg_lock(char *buf)
  88. {
  89. return dlfb_set_register(buf, 0xFF, 0x00);
  90. }
  91. static char *dlfb_vidreg_unlock(char *buf)
  92. {
  93. return dlfb_set_register(buf, 0xFF, 0xFF);
  94. }
  95. /*
  96. * On/Off for driving the DisplayLink framebuffer to the display
  97. * 0x00 H and V sync on
  98. * 0x01 H and V sync off (screen blank but powered)
  99. * 0x07 DPMS powerdown (requires modeset to come back)
  100. */
  101. static char *dlfb_enable_hvsync(char *buf, bool enable)
  102. {
  103. if (enable)
  104. return dlfb_set_register(buf, 0x1F, 0x00);
  105. else
  106. return dlfb_set_register(buf, 0x1F, 0x07);
  107. }
  108. static char *dlfb_set_color_depth(char *buf, u8 selection)
  109. {
  110. return dlfb_set_register(buf, 0x00, selection);
  111. }
  112. static char *dlfb_set_base16bpp(char *wrptr, u32 base)
  113. {
  114. /* the base pointer is 16 bits wide, 0x20 is hi byte. */
  115. wrptr = dlfb_set_register(wrptr, 0x20, base >> 16);
  116. wrptr = dlfb_set_register(wrptr, 0x21, base >> 8);
  117. return dlfb_set_register(wrptr, 0x22, base);
  118. }
  119. /*
  120. * DisplayLink HW has separate 16bpp and 8bpp framebuffers.
  121. * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
  122. */
  123. static char *dlfb_set_base8bpp(char *wrptr, u32 base)
  124. {
  125. wrptr = dlfb_set_register(wrptr, 0x26, base >> 16);
  126. wrptr = dlfb_set_register(wrptr, 0x27, base >> 8);
  127. return dlfb_set_register(wrptr, 0x28, base);
  128. }
  129. static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value)
  130. {
  131. wrptr = dlfb_set_register(wrptr, reg, value >> 8);
  132. return dlfb_set_register(wrptr, reg+1, value);
  133. }
  134. /*
  135. * This is kind of weird because the controller takes some
  136. * register values in a different byte order than other registers.
  137. */
  138. static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value)
  139. {
  140. wrptr = dlfb_set_register(wrptr, reg, value);
  141. return dlfb_set_register(wrptr, reg+1, value >> 8);
  142. }
  143. /*
  144. * LFSR is linear feedback shift register. The reason we have this is
  145. * because the display controller needs to minimize the clock depth of
  146. * various counters used in the display path. So this code reverses the
  147. * provided value into the lfsr16 value by counting backwards to get
  148. * the value that needs to be set in the hardware comparator to get the
  149. * same actual count. This makes sense once you read above a couple of
  150. * times and think about it from a hardware perspective.
  151. */
  152. static u16 dlfb_lfsr16(u16 actual_count)
  153. {
  154. u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
  155. while (actual_count--) {
  156. lv = ((lv << 1) |
  157. (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
  158. & 0xFFFF;
  159. }
  160. return (u16) lv;
  161. }
  162. /*
  163. * This does LFSR conversion on the value that is to be written.
  164. * See LFSR explanation above for more detail.
  165. */
  166. static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
  167. {
  168. return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value));
  169. }
  170. /*
  171. * This takes a standard fbdev screeninfo struct and all of its monitor mode
  172. * details and converts them into the DisplayLink equivalent register commands.
  173. */
  174. static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var)
  175. {
  176. u16 xds, yds;
  177. u16 xde, yde;
  178. u16 yec;
  179. /* x display start */
  180. xds = var->left_margin + var->hsync_len;
  181. wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds);
  182. /* x display end */
  183. xde = xds + var->xres;
  184. wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde);
  185. /* y display start */
  186. yds = var->upper_margin + var->vsync_len;
  187. wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds);
  188. /* y display end */
  189. yde = yds + var->yres;
  190. wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde);
  191. /* x end count is active + blanking - 1 */
  192. wrptr = dlfb_set_register_lfsr16(wrptr, 0x09,
  193. xde + var->right_margin - 1);
  194. /* libdlo hardcodes hsync start to 1 */
  195. wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1);
  196. /* hsync end is width of sync pulse + 1 */
  197. wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1);
  198. /* hpixels is active pixels */
  199. wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres);
  200. /* yendcount is vertical active + vertical blanking */
  201. yec = var->yres + var->upper_margin + var->lower_margin +
  202. var->vsync_len;
  203. wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec);
  204. /* libdlo hardcodes vsync start to 0 */
  205. wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0);
  206. /* vsync end is width of vsync pulse */
  207. wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len);
  208. /* vpixels is active pixels */
  209. wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres);
  210. /* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */
  211. wrptr = dlfb_set_register_16be(wrptr, 0x1B,
  212. 200*1000*1000/var->pixclock);
  213. return wrptr;
  214. }
  215. /*
  216. * This takes a standard fbdev screeninfo struct that was fetched or prepared
  217. * and then generates the appropriate command sequence that then drives the
  218. * display controller.
  219. */
  220. static int dlfb_set_video_mode(struct dlfb_data *dev,
  221. struct fb_var_screeninfo *var)
  222. {
  223. char *buf;
  224. char *wrptr;
  225. int retval = 0;
  226. int writesize;
  227. struct urb *urb;
  228. if (!atomic_read(&dev->usb_active))
  229. return -EPERM;
  230. urb = dlfb_get_urb(dev);
  231. if (!urb)
  232. return -ENOMEM;
  233. buf = (char *) urb->transfer_buffer;
  234. /*
  235. * This first section has to do with setting the base address on the
  236. * controller * associated with the display. There are 2 base
  237. * pointers, currently, we only * use the 16 bpp segment.
  238. */
  239. wrptr = dlfb_vidreg_lock(buf);
  240. wrptr = dlfb_set_color_depth(wrptr, 0x00);
  241. /* set base for 16bpp segment to 0 */
  242. wrptr = dlfb_set_base16bpp(wrptr, 0);
  243. /* set base for 8bpp segment to end of fb */
  244. wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len);
  245. wrptr = dlfb_set_vid_cmds(wrptr, var);
  246. wrptr = dlfb_enable_hvsync(wrptr, true);
  247. wrptr = dlfb_vidreg_unlock(wrptr);
  248. writesize = wrptr - buf;
  249. retval = dlfb_submit_urb(dev, urb, writesize);
  250. return retval;
  251. }
  252. static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
  253. {
  254. unsigned long start = vma->vm_start;
  255. unsigned long size = vma->vm_end - vma->vm_start;
  256. unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  257. unsigned long page, pos;
  258. if (offset + size > info->fix.smem_len)
  259. return -EINVAL;
  260. pos = (unsigned long)info->fix.smem_start + offset;
  261. pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
  262. pos, size);
  263. while (size > 0) {
  264. page = vmalloc_to_pfn((void *)pos);
  265. if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
  266. return -EAGAIN;
  267. start += PAGE_SIZE;
  268. pos += PAGE_SIZE;
  269. if (size > PAGE_SIZE)
  270. size -= PAGE_SIZE;
  271. else
  272. size = 0;
  273. }
  274. vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
  275. return 0;
  276. }
  277. /*
  278. * Trims identical data from front and back of line
  279. * Sets new front buffer address and width
  280. * And returns byte count of identical pixels
  281. * Assumes CPU natural alignment (unsigned long)
  282. * for back and front buffer ptrs and width
  283. */
  284. static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
  285. {
  286. int j, k;
  287. const unsigned long *back = (const unsigned long *) bback;
  288. const unsigned long *front = (const unsigned long *) *bfront;
  289. const int width = *width_bytes / sizeof(unsigned long);
  290. int identical = width;
  291. int start = width;
  292. int end = width;
  293. prefetch((void *) front);
  294. prefetch((void *) back);
  295. for (j = 0; j < width; j++) {
  296. if (back[j] != front[j]) {
  297. start = j;
  298. break;
  299. }
  300. }
  301. for (k = width - 1; k > j; k--) {
  302. if (back[k] != front[k]) {
  303. end = k+1;
  304. break;
  305. }
  306. }
  307. identical = start + (width - end);
  308. *bfront = (u8 *) &front[start];
  309. *width_bytes = (end - start) * sizeof(unsigned long);
  310. return identical * sizeof(unsigned long);
  311. }
  312. /*
  313. * Render a command stream for an encoded horizontal line segment of pixels.
  314. *
  315. * A command buffer holds several commands.
  316. * It always begins with a fresh command header
  317. * (the protocol doesn't require this, but we enforce it to allow
  318. * multiple buffers to be potentially encoded and sent in parallel).
  319. * A single command encodes one contiguous horizontal line of pixels
  320. *
  321. * The function relies on the client to do all allocation, so that
  322. * rendering can be done directly to output buffers (e.g. USB URBs).
  323. * The function fills the supplied command buffer, providing information
  324. * on where it left off, so the client may call in again with additional
  325. * buffers if the line will take several buffers to complete.
  326. *
  327. * A single command can transmit a maximum of 256 pixels,
  328. * regardless of the compression ratio (protocol design limit).
  329. * To the hardware, 0 for a size byte means 256
  330. *
  331. * Rather than 256 pixel commands which are either rl or raw encoded,
  332. * the rlx command simply assumes alternating raw and rl spans within one cmd.
  333. * This has a slightly larger header overhead, but produces more even results.
  334. * It also processes all data (read and write) in a single pass.
  335. * Performance benchmarks of common cases show it having just slightly better
  336. * compression than 256 pixel raw or rle commands, with similar CPU consumpion.
  337. * But for very rl friendly data, will compress not quite as well.
  338. */
  339. static void dlfb_compress_hline(
  340. const uint16_t **pixel_start_ptr,
  341. const uint16_t *const pixel_end,
  342. uint32_t *device_address_ptr,
  343. uint8_t **command_buffer_ptr,
  344. const uint8_t *const cmd_buffer_end)
  345. {
  346. const uint16_t *pixel = *pixel_start_ptr;
  347. uint32_t dev_addr = *device_address_ptr;
  348. uint8_t *cmd = *command_buffer_ptr;
  349. const int bpp = 2;
  350. while ((pixel_end > pixel) &&
  351. (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
  352. uint8_t *raw_pixels_count_byte = 0;
  353. uint8_t *cmd_pixels_count_byte = 0;
  354. const uint16_t *raw_pixel_start = 0;
  355. const uint16_t *cmd_pixel_start, *cmd_pixel_end = 0;
  356. prefetchw((void *) cmd); /* pull in one cache line at least */
  357. *cmd++ = 0xAF;
  358. *cmd++ = 0x6B;
  359. *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
  360. *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
  361. *cmd++ = (uint8_t) ((dev_addr) & 0xFF);
  362. cmd_pixels_count_byte = cmd++; /* we'll know this later */
  363. cmd_pixel_start = pixel;
  364. raw_pixels_count_byte = cmd++; /* we'll know this later */
  365. raw_pixel_start = pixel;
  366. cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1,
  367. min((int)(pixel_end - pixel),
  368. (int)(cmd_buffer_end - cmd) / bpp));
  369. prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
  370. while (pixel < cmd_pixel_end) {
  371. const uint16_t * const repeating_pixel = pixel;
  372. *(uint16_t *)cmd = cpu_to_be16p(pixel);
  373. cmd += 2;
  374. pixel++;
  375. if (unlikely((pixel < cmd_pixel_end) &&
  376. (*pixel == *repeating_pixel))) {
  377. /* go back and fill in raw pixel count */
  378. *raw_pixels_count_byte = ((repeating_pixel -
  379. raw_pixel_start) + 1) & 0xFF;
  380. while ((pixel < cmd_pixel_end)
  381. && (*pixel == *repeating_pixel)) {
  382. pixel++;
  383. }
  384. /* immediately after raw data is repeat byte */
  385. *cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF;
  386. /* Then start another raw pixel span */
  387. raw_pixel_start = pixel;
  388. raw_pixels_count_byte = cmd++;
  389. }
  390. }
  391. if (pixel > raw_pixel_start) {
  392. /* finalize last RAW span */
  393. *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF;
  394. }
  395. *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF;
  396. dev_addr += (pixel - cmd_pixel_start) * bpp;
  397. }
  398. if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
  399. /* Fill leftover bytes with no-ops */
  400. if (cmd_buffer_end > cmd)
  401. memset(cmd, 0xAF, cmd_buffer_end - cmd);
  402. cmd = (uint8_t *) cmd_buffer_end;
  403. }
  404. *command_buffer_ptr = cmd;
  405. *pixel_start_ptr = pixel;
  406. *device_address_ptr = dev_addr;
  407. return;
  408. }
  409. /*
  410. * There are 3 copies of every pixel: The front buffer that the fbdev
  411. * client renders to, the actual framebuffer across the USB bus in hardware
  412. * (that we can only write to, slowly, and can never read), and (optionally)
  413. * our shadow copy that tracks what's been sent to that hardware buffer.
  414. */
  415. static int dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr,
  416. const char *front, char **urb_buf_ptr,
  417. u32 byte_offset, u32 byte_width,
  418. int *ident_ptr, int *sent_ptr)
  419. {
  420. const u8 *line_start, *line_end, *next_pixel;
  421. u32 dev_addr = dev->base16 + byte_offset;
  422. struct urb *urb = *urb_ptr;
  423. u8 *cmd = *urb_buf_ptr;
  424. u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
  425. line_start = (u8 *) (front + byte_offset);
  426. next_pixel = line_start;
  427. line_end = next_pixel + byte_width;
  428. if (dev->backing_buffer) {
  429. int offset;
  430. const u8 *back_start = (u8 *) (dev->backing_buffer
  431. + byte_offset);
  432. *ident_ptr += dlfb_trim_hline(back_start, &next_pixel,
  433. &byte_width);
  434. offset = next_pixel - line_start;
  435. line_end = next_pixel + byte_width;
  436. dev_addr += offset;
  437. back_start += offset;
  438. line_start += offset;
  439. memcpy((char *)back_start, (char *) line_start,
  440. byte_width);
  441. }
  442. while (next_pixel < line_end) {
  443. dlfb_compress_hline((const uint16_t **) &next_pixel,
  444. (const uint16_t *) line_end, &dev_addr,
  445. (u8 **) &cmd, (u8 *) cmd_end);
  446. if (cmd >= cmd_end) {
  447. int len = cmd - (u8 *) urb->transfer_buffer;
  448. if (dlfb_submit_urb(dev, urb, len))
  449. return 1; /* lost pixels is set */
  450. *sent_ptr += len;
  451. urb = dlfb_get_urb(dev);
  452. if (!urb)
  453. return 1; /* lost_pixels is set */
  454. *urb_ptr = urb;
  455. cmd = urb->transfer_buffer;
  456. cmd_end = &cmd[urb->transfer_buffer_length];
  457. }
  458. }
  459. *urb_buf_ptr = cmd;
  460. return 0;
  461. }
  462. int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
  463. int width, int height, char *data)
  464. {
  465. int i, ret;
  466. char *cmd;
  467. cycles_t start_cycles, end_cycles;
  468. int bytes_sent = 0;
  469. int bytes_identical = 0;
  470. struct urb *urb;
  471. int aligned_x;
  472. start_cycles = get_cycles();
  473. aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
  474. width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
  475. x = aligned_x;
  476. if ((width <= 0) ||
  477. (x + width > dev->info->var.xres) ||
  478. (y + height > dev->info->var.yres))
  479. return -EINVAL;
  480. if (!atomic_read(&dev->usb_active))
  481. return 0;
  482. urb = dlfb_get_urb(dev);
  483. if (!urb)
  484. return 0;
  485. cmd = urb->transfer_buffer;
  486. for (i = y; i < y + height ; i++) {
  487. const int line_offset = dev->info->fix.line_length * i;
  488. const int byte_offset = line_offset + (x * BPP);
  489. if (dlfb_render_hline(dev, &urb,
  490. (char *) dev->info->fix.smem_start,
  491. &cmd, byte_offset, width * BPP,
  492. &bytes_identical, &bytes_sent))
  493. goto error;
  494. }
  495. if (cmd > (char *) urb->transfer_buffer) {
  496. /* Send partial buffer remaining before exiting */
  497. int len = cmd - (char *) urb->transfer_buffer;
  498. ret = dlfb_submit_urb(dev, urb, len);
  499. bytes_sent += len;
  500. } else
  501. dlfb_urb_completion(urb);
  502. error:
  503. atomic_add(bytes_sent, &dev->bytes_sent);
  504. atomic_add(bytes_identical, &dev->bytes_identical);
  505. atomic_add(width*height*2, &dev->bytes_rendered);
  506. end_cycles = get_cycles();
  507. atomic_add(((unsigned int) ((end_cycles - start_cycles)
  508. >> 10)), /* Kcycles */
  509. &dev->cpu_kcycles_used);
  510. return 0;
  511. }
  512. /*
  513. * Path triggered by usermode clients who write to filesystem
  514. * e.g. cat filename > /dev/fb1
  515. * Not used by X Windows or text-mode console. But useful for testing.
  516. * Slow because of extra copy and we must assume all pixels dirty.
  517. */
  518. static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
  519. size_t count, loff_t *ppos)
  520. {
  521. ssize_t result;
  522. struct dlfb_data *dev = info->par;
  523. u32 offset = (u32) *ppos;
  524. result = fb_sys_write(info, buf, count, ppos);
  525. if (result > 0) {
  526. int start = max((int)(offset / info->fix.line_length) - 1, 0);
  527. int lines = min((u32)((result / info->fix.line_length) + 1),
  528. (u32)info->var.yres);
  529. dlfb_handle_damage(dev, 0, start, info->var.xres,
  530. lines, info->screen_base);
  531. }
  532. return result;
  533. }
  534. /* hardware has native COPY command (see libdlo), but not worth it for fbcon */
  535. static void dlfb_ops_copyarea(struct fb_info *info,
  536. const struct fb_copyarea *area)
  537. {
  538. struct dlfb_data *dev = info->par;
  539. sys_copyarea(info, area);
  540. dlfb_handle_damage(dev, area->dx, area->dy,
  541. area->width, area->height, info->screen_base);
  542. }
  543. static void dlfb_ops_imageblit(struct fb_info *info,
  544. const struct fb_image *image)
  545. {
  546. struct dlfb_data *dev = info->par;
  547. sys_imageblit(info, image);
  548. dlfb_handle_damage(dev, image->dx, image->dy,
  549. image->width, image->height, info->screen_base);
  550. }
  551. static void dlfb_ops_fillrect(struct fb_info *info,
  552. const struct fb_fillrect *rect)
  553. {
  554. struct dlfb_data *dev = info->par;
  555. sys_fillrect(info, rect);
  556. dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width,
  557. rect->height, info->screen_base);
  558. }
  559. /*
  560. * NOTE: fb_defio.c is holding info->fbdefio.mutex
  561. * Touching ANY framebuffer memory that triggers a page fault
  562. * in fb_defio will cause a deadlock, when it also tries to
  563. * grab the same mutex.
  564. */
  565. static void dlfb_dpy_deferred_io(struct fb_info *info,
  566. struct list_head *pagelist)
  567. {
  568. struct page *cur;
  569. struct fb_deferred_io *fbdefio = info->fbdefio;
  570. struct dlfb_data *dev = info->par;
  571. struct urb *urb;
  572. char *cmd;
  573. cycles_t start_cycles, end_cycles;
  574. int bytes_sent = 0;
  575. int bytes_identical = 0;
  576. int bytes_rendered = 0;
  577. if (!fb_defio)
  578. return;
  579. if (!atomic_read(&dev->usb_active))
  580. return;
  581. start_cycles = get_cycles();
  582. urb = dlfb_get_urb(dev);
  583. if (!urb)
  584. return;
  585. cmd = urb->transfer_buffer;
  586. /* walk the written page list and render each to device */
  587. list_for_each_entry(cur, &fbdefio->pagelist, lru) {
  588. if (dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start,
  589. &cmd, cur->index << PAGE_SHIFT,
  590. PAGE_SIZE, &bytes_identical, &bytes_sent))
  591. goto error;
  592. bytes_rendered += PAGE_SIZE;
  593. }
  594. if (cmd > (char *) urb->transfer_buffer) {
  595. /* Send partial buffer remaining before exiting */
  596. int len = cmd - (char *) urb->transfer_buffer;
  597. dlfb_submit_urb(dev, urb, len);
  598. bytes_sent += len;
  599. } else
  600. dlfb_urb_completion(urb);
  601. error:
  602. atomic_add(bytes_sent, &dev->bytes_sent);
  603. atomic_add(bytes_identical, &dev->bytes_identical);
  604. atomic_add(bytes_rendered, &dev->bytes_rendered);
  605. end_cycles = get_cycles();
  606. atomic_add(((unsigned int) ((end_cycles - start_cycles)
  607. >> 10)), /* Kcycles */
  608. &dev->cpu_kcycles_used);
  609. }
  610. static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
  611. {
  612. int i;
  613. int ret;
  614. char *rbuf;
  615. rbuf = kmalloc(2, GFP_KERNEL);
  616. if (!rbuf)
  617. return 0;
  618. for (i = 0; i < len; i++) {
  619. ret = usb_control_msg(dev->udev,
  620. usb_rcvctrlpipe(dev->udev, 0), (0x02),
  621. (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
  622. HZ);
  623. if (ret < 1) {
  624. pr_err("Read EDID byte %d failed err %x\n", i, ret);
  625. i--;
  626. break;
  627. }
  628. edid[i] = rbuf[1];
  629. }
  630. kfree(rbuf);
  631. return i;
  632. }
  633. static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
  634. unsigned long arg)
  635. {
  636. struct dlfb_data *dev = info->par;
  637. struct dloarea *area = NULL;
  638. if (!atomic_read(&dev->usb_active))
  639. return 0;
  640. /* TODO: Update X server to get this from sysfs instead */
  641. if (cmd == DLFB_IOCTL_RETURN_EDID) {
  642. char *edid = (char *)arg;
  643. if (copy_to_user(edid, dev->edid, dev->edid_size))
  644. return -EFAULT;
  645. return 0;
  646. }
  647. /* TODO: Help propose a standard fb.h ioctl to report mmap damage */
  648. if (cmd == DLFB_IOCTL_REPORT_DAMAGE) {
  649. /*
  650. * If we have a damage-aware client, turn fb_defio "off"
  651. * To avoid perf imact of unnecessary page fault handling.
  652. * Done by resetting the delay for this fb_info to a very
  653. * long period. Pages will become writable and stay that way.
  654. * Reset to normal value when all clients have closed this fb.
  655. */
  656. if (info->fbdefio)
  657. info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE;
  658. area = (struct dloarea *)arg;
  659. if (area->x < 0)
  660. area->x = 0;
  661. if (area->x > info->var.xres)
  662. area->x = info->var.xres;
  663. if (area->y < 0)
  664. area->y = 0;
  665. if (area->y > info->var.yres)
  666. area->y = info->var.yres;
  667. dlfb_handle_damage(dev, area->x, area->y, area->w, area->h,
  668. info->screen_base);
  669. }
  670. return 0;
  671. }
  672. /* taken from vesafb */
  673. static int
  674. dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
  675. unsigned blue, unsigned transp, struct fb_info *info)
  676. {
  677. int err = 0;
  678. if (regno >= info->cmap.len)
  679. return 1;
  680. if (regno < 16) {
  681. if (info->var.red.offset == 10) {
  682. /* 1:5:5:5 */
  683. ((u32 *) (info->pseudo_palette))[regno] =
  684. ((red & 0xf800) >> 1) |
  685. ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
  686. } else {
  687. /* 0:5:6:5 */
  688. ((u32 *) (info->pseudo_palette))[regno] =
  689. ((red & 0xf800)) |
  690. ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
  691. }
  692. }
  693. return err;
  694. }
  695. /*
  696. * It's common for several clients to have framebuffer open simultaneously.
  697. * e.g. both fbcon and X. Makes things interesting.
  698. * Assumes caller is holding info->lock (for open and release at least)
  699. */
  700. static int dlfb_ops_open(struct fb_info *info, int user)
  701. {
  702. struct dlfb_data *dev = info->par;
  703. /*
  704. * fbcon aggressively connects to first framebuffer it finds,
  705. * preventing other clients (X) from working properly. Usually
  706. * not what the user wants. Fail by default with option to enable.
  707. */
  708. if ((user == 0) & (!console))
  709. return -EBUSY;
  710. /* If the USB device is gone, we don't accept new opens */
  711. if (dev->virtualized)
  712. return -ENODEV;
  713. dev->fb_count++;
  714. kref_get(&dev->kref);
  715. if (fb_defio && (info->fbdefio == NULL)) {
  716. /* enable defio at last moment if not disabled by client */
  717. struct fb_deferred_io *fbdefio;
  718. fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
  719. if (fbdefio) {
  720. fbdefio->delay = DL_DEFIO_WRITE_DELAY;
  721. fbdefio->deferred_io = dlfb_dpy_deferred_io;
  722. }
  723. info->fbdefio = fbdefio;
  724. fb_deferred_io_init(info);
  725. }
  726. pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
  727. info->node, user, info, dev->fb_count);
  728. return 0;
  729. }
  730. /*
  731. * Called when all client interfaces to start transactions have been disabled,
  732. * and all references to our device instance (dlfb_data) are released.
  733. * Every transaction must have a reference, so we know are fully spun down
  734. */
  735. static void dlfb_free(struct kref *kref)
  736. {
  737. struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
  738. /* this function will wait for all in-flight urbs to complete */
  739. if (dev->urbs.count > 0)
  740. dlfb_free_urb_list(dev);
  741. if (dev->backing_buffer)
  742. vfree(dev->backing_buffer);
  743. kfree(dev->edid);
  744. pr_warn("freeing dlfb_data %p\n", dev);
  745. kfree(dev);
  746. }
  747. static void dlfb_release_urb_work(struct work_struct *work)
  748. {
  749. struct urb_node *unode = container_of(work, struct urb_node,
  750. release_urb_work.work);
  751. up(&unode->dev->urbs.limit_sem);
  752. }
  753. static void dlfb_free_framebuffer_work(struct work_struct *work)
  754. {
  755. struct dlfb_data *dev = container_of(work, struct dlfb_data,
  756. free_framebuffer_work.work);
  757. struct fb_info *info = dev->info;
  758. int node = info->node;
  759. unregister_framebuffer(info);
  760. if (info->cmap.len != 0)
  761. fb_dealloc_cmap(&info->cmap);
  762. if (info->monspecs.modedb)
  763. fb_destroy_modedb(info->monspecs.modedb);
  764. if (info->screen_base)
  765. vfree(info->screen_base);
  766. fb_destroy_modelist(&info->modelist);
  767. dev->info = 0;
  768. /* Assume info structure is freed after this point */
  769. framebuffer_release(info);
  770. pr_warn("fb_info for /dev/fb%d has been freed\n", node);
  771. /* ref taken in probe() as part of registering framebfufer */
  772. kref_put(&dev->kref, dlfb_free);
  773. }
  774. /*
  775. * Assumes caller is holding info->lock mutex (for open and release at least)
  776. */
  777. static int dlfb_ops_release(struct fb_info *info, int user)
  778. {
  779. struct dlfb_data *dev = info->par;
  780. dev->fb_count--;
  781. /* We can't free fb_info here - fbmem will touch it when we return */
  782. if (dev->virtualized && (dev->fb_count == 0))
  783. schedule_delayed_work(&dev->free_framebuffer_work, HZ);
  784. if ((dev->fb_count == 0) && (info->fbdefio)) {
  785. fb_deferred_io_cleanup(info);
  786. kfree(info->fbdefio);
  787. info->fbdefio = NULL;
  788. info->fbops->fb_mmap = dlfb_ops_mmap;
  789. }
  790. pr_warn("released /dev/fb%d user=%d count=%d\n",
  791. info->node, user, dev->fb_count);
  792. kref_put(&dev->kref, dlfb_free);
  793. return 0;
  794. }
  795. /*
  796. * Check whether a video mode is supported by the DisplayLink chip
  797. * We start from monitor's modes, so don't need to filter that here
  798. */
  799. static int dlfb_is_valid_mode(struct fb_videomode *mode,
  800. struct fb_info *info)
  801. {
  802. struct dlfb_data *dev = info->par;
  803. if (mode->xres * mode->yres > dev->sku_pixel_limit) {
  804. pr_warn("%dx%d beyond chip capabilities\n",
  805. mode->xres, mode->yres);
  806. return 0;
  807. }
  808. pr_info("%dx%d valid mode\n", mode->xres, mode->yres);
  809. return 1;
  810. }
  811. static void dlfb_var_color_format(struct fb_var_screeninfo *var)
  812. {
  813. const struct fb_bitfield red = { 11, 5, 0 };
  814. const struct fb_bitfield green = { 5, 6, 0 };
  815. const struct fb_bitfield blue = { 0, 5, 0 };
  816. var->bits_per_pixel = 16;
  817. var->red = red;
  818. var->green = green;
  819. var->blue = blue;
  820. }
  821. static int dlfb_ops_check_var(struct fb_var_screeninfo *var,
  822. struct fb_info *info)
  823. {
  824. struct fb_videomode mode;
  825. /* TODO: support dynamically changing framebuffer size */
  826. if ((var->xres * var->yres * 2) > info->fix.smem_len)
  827. return -EINVAL;
  828. /* set device-specific elements of var unrelated to mode */
  829. dlfb_var_color_format(var);
  830. fb_var_to_videomode(&mode, var);
  831. if (!dlfb_is_valid_mode(&mode, info))
  832. return -EINVAL;
  833. return 0;
  834. }
  835. static int dlfb_ops_set_par(struct fb_info *info)
  836. {
  837. struct dlfb_data *dev = info->par;
  838. int result;
  839. u16 *pix_framebuffer;
  840. int i;
  841. pr_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres);
  842. result = dlfb_set_video_mode(dev, &info->var);
  843. if ((result == 0) && (dev->fb_count == 0)) {
  844. /* paint greenscreen */
  845. pix_framebuffer = (u16 *) info->screen_base;
  846. for (i = 0; i < info->fix.smem_len / 2; i++)
  847. pix_framebuffer[i] = 0x37e6;
  848. dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres,
  849. info->screen_base);
  850. }
  851. return result;
  852. }
  853. /*
  854. * In order to come back from full DPMS off, we need to set the mode again
  855. */
  856. static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
  857. {
  858. struct dlfb_data *dev = info->par;
  859. if (blank_mode != FB_BLANK_UNBLANK) {
  860. char *bufptr;
  861. struct urb *urb;
  862. urb = dlfb_get_urb(dev);
  863. if (!urb)
  864. return 0;
  865. bufptr = (char *) urb->transfer_buffer;
  866. bufptr = dlfb_vidreg_lock(bufptr);
  867. bufptr = dlfb_enable_hvsync(bufptr, false);
  868. bufptr = dlfb_vidreg_unlock(bufptr);
  869. dlfb_submit_urb(dev, urb, bufptr -
  870. (char *) urb->transfer_buffer);
  871. } else {
  872. dlfb_set_video_mode(dev, &info->var);
  873. }
  874. return 0;
  875. }
  876. static struct fb_ops dlfb_ops = {
  877. .owner = THIS_MODULE,
  878. .fb_read = fb_sys_read,
  879. .fb_write = dlfb_ops_write,
  880. .fb_setcolreg = dlfb_ops_setcolreg,
  881. .fb_fillrect = dlfb_ops_fillrect,
  882. .fb_copyarea = dlfb_ops_copyarea,
  883. .fb_imageblit = dlfb_ops_imageblit,
  884. .fb_mmap = dlfb_ops_mmap,
  885. .fb_ioctl = dlfb_ops_ioctl,
  886. .fb_open = dlfb_ops_open,
  887. .fb_release = dlfb_ops_release,
  888. .fb_blank = dlfb_ops_blank,
  889. .fb_check_var = dlfb_ops_check_var,
  890. .fb_set_par = dlfb_ops_set_par,
  891. };
  892. /*
  893. * Assumes &info->lock held by caller
  894. * Assumes no active clients have framebuffer open
  895. */
  896. static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
  897. {
  898. int retval = -ENOMEM;
  899. int old_len = info->fix.smem_len;
  900. int new_len;
  901. unsigned char *old_fb = info->screen_base;
  902. unsigned char *new_fb;
  903. unsigned char *new_back;
  904. pr_warn("Reallocating framebuffer. Addresses will change!\n");
  905. new_len = info->fix.line_length * info->var.yres;
  906. if (PAGE_ALIGN(new_len) > old_len) {
  907. /*
  908. * Alloc system memory for virtual framebuffer
  909. */
  910. new_fb = vmalloc(new_len);
  911. if (!new_fb) {
  912. pr_err("Virtual framebuffer alloc failed\n");
  913. goto error;
  914. }
  915. if (info->screen_base) {
  916. memcpy(new_fb, old_fb, old_len);
  917. vfree(info->screen_base);
  918. }
  919. info->screen_base = new_fb;
  920. info->fix.smem_len = PAGE_ALIGN(new_len);
  921. info->fix.smem_start = (unsigned long) new_fb;
  922. info->flags = udlfb_info_flags;
  923. /*
  924. * Second framebuffer copy to mirror the framebuffer state
  925. * on the physical USB device. We can function without this.
  926. * But with imperfect damage info we may send pixels over USB
  927. * that were, in fact, unchanged - wasting limited USB bandwidth
  928. */
  929. new_back = vzalloc(new_len);
  930. if (!new_back)
  931. pr_info("No shadow/backing buffer allocated\n");
  932. else {
  933. if (dev->backing_buffer)
  934. vfree(dev->backing_buffer);
  935. dev->backing_buffer = new_back;
  936. }
  937. }
  938. retval = 0;
  939. error:
  940. return retval;
  941. }
  942. /*
  943. * 1) Get EDID from hw, or use sw default
  944. * 2) Parse into various fb_info structs
  945. * 3) Allocate virtual framebuffer memory to back highest res mode
  946. *
  947. * Parses EDID into three places used by various parts of fbdev:
  948. * fb_var_screeninfo contains the timing of the monitor's preferred mode
  949. * fb_info.monspecs is full parsed EDID info, including monspecs.modedb
  950. * fb_info.modelist is a linked list of all monitor & VESA modes which work
  951. *
  952. * If EDID is not readable/valid, then modelist is all VESA modes,
  953. * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
  954. * Returns 0 if successful
  955. */
  956. static int dlfb_setup_modes(struct dlfb_data *dev,
  957. struct fb_info *info,
  958. char *default_edid, size_t default_edid_size)
  959. {
  960. int i;
  961. const struct fb_videomode *default_vmode = NULL;
  962. int result = 0;
  963. char *edid;
  964. int tries = 3;
  965. if (info->dev) /* only use mutex if info has been registered */
  966. mutex_lock(&info->lock);
  967. edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
  968. if (!edid) {
  969. result = -ENOMEM;
  970. goto error;
  971. }
  972. fb_destroy_modelist(&info->modelist);
  973. memset(&info->monspecs, 0, sizeof(info->monspecs));
  974. /*
  975. * Try to (re)read EDID from hardware first
  976. * EDID data may return, but not parse as valid
  977. * Try again a few times, in case of e.g. analog cable noise
  978. */
  979. while (tries--) {
  980. i = dlfb_get_edid(dev, edid, EDID_LENGTH);
  981. if (i >= EDID_LENGTH)
  982. fb_edid_to_monspecs(edid, &info->monspecs);
  983. if (info->monspecs.modedb_len > 0) {
  984. dev->edid = edid;
  985. dev->edid_size = i;
  986. break;
  987. }
  988. }
  989. /* If that fails, use a previously returned EDID if available */
  990. if (info->monspecs.modedb_len == 0) {
  991. pr_err("Unable to get valid EDID from device/display\n");
  992. if (dev->edid) {
  993. fb_edid_to_monspecs(dev->edid, &info->monspecs);
  994. if (info->monspecs.modedb_len > 0)
  995. pr_err("Using previously queried EDID\n");
  996. }
  997. }
  998. /* If that fails, use the default EDID we were handed */
  999. if (info->monspecs.modedb_len == 0) {
  1000. if (default_edid_size >= EDID_LENGTH) {
  1001. fb_edid_to_monspecs(default_edid, &info->monspecs);
  1002. if (info->monspecs.modedb_len > 0) {
  1003. memcpy(edid, default_edid, default_edid_size);
  1004. dev->edid = edid;
  1005. dev->edid_size = default_edid_size;
  1006. pr_err("Using default/backup EDID\n");
  1007. }
  1008. }
  1009. }
  1010. /* If we've got modes, let's pick a best default mode */
  1011. if (info->monspecs.modedb_len > 0) {
  1012. for (i = 0; i < info->monspecs.modedb_len; i++) {
  1013. if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
  1014. fb_add_videomode(&info->monspecs.modedb[i],
  1015. &info->modelist);
  1016. else {
  1017. if (i == 0)
  1018. /* if we've removed top/best mode */
  1019. info->monspecs.misc
  1020. &= ~FB_MISC_1ST_DETAIL;
  1021. }
  1022. }
  1023. default_vmode = fb_find_best_display(&info->monspecs,
  1024. &info->modelist);
  1025. }
  1026. /* If everything else has failed, fall back to safe default mode */
  1027. if (default_vmode == NULL) {
  1028. struct fb_videomode fb_vmode = {0};
  1029. /*
  1030. * Add the standard VESA modes to our modelist
  1031. * Since we don't have EDID, there may be modes that
  1032. * overspec monitor and/or are incorrect aspect ratio, etc.
  1033. * But at least the user has a chance to choose
  1034. */
  1035. for (i = 0; i < VESA_MODEDB_SIZE; i++) {
  1036. if (dlfb_is_valid_mode((struct fb_videomode *)
  1037. &vesa_modes[i], info))
  1038. fb_add_videomode(&vesa_modes[i],
  1039. &info->modelist);
  1040. }
  1041. /*
  1042. * default to resolution safe for projectors
  1043. * (since they are most common case without EDID)
  1044. */
  1045. fb_vmode.xres = 800;
  1046. fb_vmode.yres = 600;
  1047. fb_vmode.refresh = 60;
  1048. default_vmode = fb_find_nearest_mode(&fb_vmode,
  1049. &info->modelist);
  1050. }
  1051. /* If we have good mode and no active clients*/
  1052. if ((default_vmode != NULL) && (dev->fb_count == 0)) {
  1053. fb_videomode_to_var(&info->var, default_vmode);
  1054. dlfb_var_color_format(&info->var);
  1055. /*
  1056. * with mode size info, we can now alloc our framebuffer.
  1057. */
  1058. memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix));
  1059. info->fix.line_length = info->var.xres *
  1060. (info->var.bits_per_pixel / 8);
  1061. result = dlfb_realloc_framebuffer(dev, info);
  1062. } else
  1063. result = -EINVAL;
  1064. error:
  1065. if (edid && (dev->edid != edid))
  1066. kfree(edid);
  1067. if (info->dev)
  1068. mutex_unlock(&info->lock);
  1069. return result;
  1070. }
  1071. static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
  1072. struct device_attribute *a, char *buf) {
  1073. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1074. struct dlfb_data *dev = fb_info->par;
  1075. return snprintf(buf, PAGE_SIZE, "%u\n",
  1076. atomic_read(&dev->bytes_rendered));
  1077. }
  1078. static ssize_t metrics_bytes_identical_show(struct device *fbdev,
  1079. struct device_attribute *a, char *buf) {
  1080. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1081. struct dlfb_data *dev = fb_info->par;
  1082. return snprintf(buf, PAGE_SIZE, "%u\n",
  1083. atomic_read(&dev->bytes_identical));
  1084. }
  1085. static ssize_t metrics_bytes_sent_show(struct device *fbdev,
  1086. struct device_attribute *a, char *buf) {
  1087. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1088. struct dlfb_data *dev = fb_info->par;
  1089. return snprintf(buf, PAGE_SIZE, "%u\n",
  1090. atomic_read(&dev->bytes_sent));
  1091. }
  1092. static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
  1093. struct device_attribute *a, char *buf) {
  1094. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1095. struct dlfb_data *dev = fb_info->par;
  1096. return snprintf(buf, PAGE_SIZE, "%u\n",
  1097. atomic_read(&dev->cpu_kcycles_used));
  1098. }
  1099. static ssize_t edid_show(
  1100. struct file *filp,
  1101. struct kobject *kobj, struct bin_attribute *a,
  1102. char *buf, loff_t off, size_t count) {
  1103. struct device *fbdev = container_of(kobj, struct device, kobj);
  1104. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1105. struct dlfb_data *dev = fb_info->par;
  1106. if (dev->edid == NULL)
  1107. return 0;
  1108. if ((off >= dev->edid_size) || (count > dev->edid_size))
  1109. return 0;
  1110. if (off + count > dev->edid_size)
  1111. count = dev->edid_size - off;
  1112. pr_info("sysfs edid copy %p to %p, %d bytes\n",
  1113. dev->edid, buf, (int) count);
  1114. memcpy(buf, dev->edid, count);
  1115. return count;
  1116. }
  1117. static ssize_t edid_store(
  1118. struct file *filp,
  1119. struct kobject *kobj, struct bin_attribute *a,
  1120. char *src, loff_t src_off, size_t src_size) {
  1121. struct device *fbdev = container_of(kobj, struct device, kobj);
  1122. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1123. struct dlfb_data *dev = fb_info->par;
  1124. /* We only support write of entire EDID at once, no offset*/
  1125. if ((src_size != EDID_LENGTH) || (src_off != 0))
  1126. return 0;
  1127. dlfb_setup_modes(dev, fb_info, src, src_size);
  1128. if (dev->edid && (memcmp(src, dev->edid, src_size) == 0)) {
  1129. pr_info("sysfs written EDID is new default\n");
  1130. dlfb_ops_set_par(fb_info);
  1131. return src_size;
  1132. } else
  1133. return 0;
  1134. }
  1135. static ssize_t metrics_reset_store(struct device *fbdev,
  1136. struct device_attribute *attr,
  1137. const char *buf, size_t count)
  1138. {
  1139. struct fb_info *fb_info = dev_get_drvdata(fbdev);
  1140. struct dlfb_data *dev = fb_info->par;
  1141. atomic_set(&dev->bytes_rendered, 0);
  1142. atomic_set(&dev->bytes_identical, 0);
  1143. atomic_set(&dev->bytes_sent, 0);
  1144. atomic_set(&dev->cpu_kcycles_used, 0);
  1145. return count;
  1146. }
  1147. static struct bin_attribute edid_attr = {
  1148. .attr.name = "edid",
  1149. .attr.mode = 0666,
  1150. .size = EDID_LENGTH,
  1151. .read = edid_show,
  1152. .write = edid_store
  1153. };
  1154. static struct device_attribute fb_device_attrs[] = {
  1155. __ATTR_RO(metrics_bytes_rendered),
  1156. __ATTR_RO(metrics_bytes_identical),
  1157. __ATTR_RO(metrics_bytes_sent),
  1158. __ATTR_RO(metrics_cpu_kcycles_used),
  1159. __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store),
  1160. };
  1161. /*
  1162. * This is necessary before we can communicate with the display controller.
  1163. */
  1164. static int dlfb_select_std_channel(struct dlfb_data *dev)
  1165. {
  1166. int ret;
  1167. u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
  1168. 0x1C, 0x88, 0x5E, 0x15,
  1169. 0x60, 0xFE, 0xC6, 0x97,
  1170. 0x16, 0x3D, 0x47, 0xF2 };
  1171. ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
  1172. NR_USB_REQUEST_CHANNEL,
  1173. (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
  1174. set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
  1175. return ret;
  1176. }
  1177. static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev,
  1178. struct usb_device *usbdev)
  1179. {
  1180. char *desc;
  1181. char *buf;
  1182. char *desc_end;
  1183. u8 total_len = 0;
  1184. buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
  1185. if (!buf)
  1186. return false;
  1187. desc = buf;
  1188. total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
  1189. 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
  1190. if (total_len > 5) {
  1191. pr_info("vendor descriptor length:%x data:%02x %02x %02x %02x" \
  1192. "%02x %02x %02x %02x %02x %02x %02x\n",
  1193. total_len, desc[0],
  1194. desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
  1195. desc[7], desc[8], desc[9], desc[10]);
  1196. if ((desc[0] != total_len) || /* descriptor length */
  1197. (desc[1] != 0x5f) || /* vendor descriptor type */
  1198. (desc[2] != 0x01) || /* version (2 bytes) */
  1199. (desc[3] != 0x00) ||
  1200. (desc[4] != total_len - 2)) /* length after type */
  1201. goto unrecognized;
  1202. desc_end = desc + total_len;
  1203. desc += 5; /* the fixed header we've already parsed */
  1204. while (desc < desc_end) {
  1205. u8 length;
  1206. u16 key;
  1207. key = *((u16 *) desc);
  1208. desc += sizeof(u16);
  1209. length = *desc;
  1210. desc++;
  1211. switch (key) {
  1212. case 0x0200: { /* max_area */
  1213. u32 max_area;
  1214. max_area = le32_to_cpu(*((u32 *)desc));
  1215. pr_warn("DL chip limited to %d pixel modes\n",
  1216. max_area);
  1217. dev->sku_pixel_limit = max_area;
  1218. break;
  1219. }
  1220. default:
  1221. break;
  1222. }
  1223. desc += length;
  1224. }
  1225. }
  1226. goto success;
  1227. unrecognized:
  1228. /* allow udlfb to load for now even if firmware unrecognized */
  1229. pr_err("Unrecognized vendor firmware descriptor\n");
  1230. success:
  1231. kfree(buf);
  1232. return true;
  1233. }
  1234. static int dlfb_usb_probe(struct usb_interface *interface,
  1235. const struct usb_device_id *id)
  1236. {
  1237. struct usb_device *usbdev;
  1238. struct dlfb_data *dev = 0;
  1239. struct fb_info *info = 0;
  1240. int retval = -ENOMEM;
  1241. int i;
  1242. /* usb initialization */
  1243. usbdev = interface_to_usbdev(interface);
  1244. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1245. if (dev == NULL) {
  1246. err("dlfb_usb_probe: failed alloc of dev struct\n");
  1247. goto error;
  1248. }
  1249. /* we need to wait for both usb and fbdev to spin down on disconnect */
  1250. kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
  1251. kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */
  1252. dev->udev = usbdev;
  1253. dev->gdev = &usbdev->dev; /* our generic struct device * */
  1254. usb_set_intfdata(interface, dev);
  1255. pr_info("%s %s - serial #%s\n",
  1256. usbdev->manufacturer, usbdev->product, usbdev->serial);
  1257. pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
  1258. usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
  1259. usbdev->descriptor.bcdDevice, dev);
  1260. pr_info("console enable=%d\n", console);
  1261. pr_info("fb_defio enable=%d\n", fb_defio);
  1262. dev->sku_pixel_limit = 2048 * 1152; /* default to maximum */
  1263. if (!dlfb_parse_vendor_descriptor(dev, usbdev)) {
  1264. pr_err("firmware not recognized. Assume incompatible device\n");
  1265. goto error;
  1266. }
  1267. if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
  1268. retval = -ENOMEM;
  1269. pr_err("dlfb_alloc_urb_list failed\n");
  1270. goto error;
  1271. }
  1272. /* We don't register a new USB class. Our client interface is fbdev */
  1273. /* allocates framebuffer driver structure, not framebuffer memory */
  1274. info = framebuffer_alloc(0, &interface->dev);
  1275. if (!info) {
  1276. retval = -ENOMEM;
  1277. pr_err("framebuffer_alloc failed\n");
  1278. goto error;
  1279. }
  1280. dev->info = info;
  1281. info->par = dev;
  1282. info->pseudo_palette = dev->pseudo_palette;
  1283. info->fbops = &dlfb_ops;
  1284. retval = fb_alloc_cmap(&info->cmap, 256, 0);
  1285. if (retval < 0) {
  1286. pr_err("fb_alloc_cmap failed %x\n", retval);
  1287. goto error;
  1288. }
  1289. INIT_DELAYED_WORK(&dev->free_framebuffer_work,
  1290. dlfb_free_framebuffer_work);
  1291. INIT_LIST_HEAD(&info->modelist);
  1292. retval = dlfb_setup_modes(dev, info, NULL, 0);
  1293. if (retval != 0) {
  1294. pr_err("unable to find common mode for display and adapter\n");
  1295. goto error;
  1296. }
  1297. /* ready to begin using device */
  1298. atomic_set(&dev->usb_active, 1);
  1299. dlfb_select_std_channel(dev);
  1300. dlfb_ops_check_var(&info->var, info);
  1301. dlfb_ops_set_par(info);
  1302. retval = register_framebuffer(info);
  1303. if (retval < 0) {
  1304. pr_err("register_framebuffer failed %d\n", retval);
  1305. goto error;
  1306. }
  1307. for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) {
  1308. retval = device_create_file(info->dev, &fb_device_attrs[i]);
  1309. if (retval) {
  1310. pr_err("device_create_file failed %d\n", retval);
  1311. goto err_del_attrs;
  1312. }
  1313. }
  1314. retval = device_create_bin_file(info->dev, &edid_attr);
  1315. if (retval) {
  1316. pr_err("device_create_bin_file failed %d\n", retval);
  1317. goto err_del_attrs;
  1318. }
  1319. pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
  1320. " Using %dK framebuffer memory\n", info->node,
  1321. info->var.xres, info->var.yres,
  1322. ((dev->backing_buffer) ?
  1323. info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
  1324. return 0;
  1325. err_del_attrs:
  1326. for (i -= 1; i >= 0; i--)
  1327. device_remove_file(info->dev, &fb_device_attrs[i]);
  1328. error:
  1329. if (dev) {
  1330. if (info) {
  1331. if (info->cmap.len != 0)
  1332. fb_dealloc_cmap(&info->cmap);
  1333. if (info->monspecs.modedb)
  1334. fb_destroy_modedb(info->monspecs.modedb);
  1335. if (info->screen_base)
  1336. vfree(info->screen_base);
  1337. fb_destroy_modelist(&info->modelist);
  1338. framebuffer_release(info);
  1339. }
  1340. if (dev->backing_buffer)
  1341. vfree(dev->backing_buffer);
  1342. kref_put(&dev->kref, dlfb_free); /* ref for framebuffer */
  1343. kref_put(&dev->kref, dlfb_free); /* last ref from kref_init */
  1344. /* dev has been deallocated. Do not dereference */
  1345. }
  1346. return retval;
  1347. }
  1348. static void dlfb_usb_disconnect(struct usb_interface *interface)
  1349. {
  1350. struct dlfb_data *dev;
  1351. struct fb_info *info;
  1352. int i;
  1353. dev = usb_get_intfdata(interface);
  1354. info = dev->info;
  1355. pr_info("USB disconnect starting\n");
  1356. /* we virtualize until all fb clients release. Then we free */
  1357. dev->virtualized = true;
  1358. /* When non-active we'll update virtual framebuffer, but no new urbs */
  1359. atomic_set(&dev->usb_active, 0);
  1360. /* remove udlfb's sysfs interfaces */
  1361. for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
  1362. device_remove_file(info->dev, &fb_device_attrs[i]);
  1363. device_remove_bin_file(info->dev, &edid_attr);
  1364. usb_set_intfdata(interface, NULL);
  1365. /* if clients still have us open, will be freed on last close */
  1366. if (dev->fb_count == 0)
  1367. schedule_delayed_work(&dev->free_framebuffer_work, 0);
  1368. /* release reference taken by kref_init in probe() */
  1369. kref_put(&dev->kref, dlfb_free);
  1370. /* consider dlfb_data freed */
  1371. return;
  1372. }
  1373. static struct usb_driver dlfb_driver = {
  1374. .name = "udlfb",
  1375. .probe = dlfb_usb_probe,
  1376. .disconnect = dlfb_usb_disconnect,
  1377. .id_table = id_table,
  1378. };
  1379. static int __init dlfb_module_init(void)
  1380. {
  1381. int res;
  1382. res = usb_register(&dlfb_driver);
  1383. if (res)
  1384. err("usb_register failed. Error number %d", res);
  1385. return res;
  1386. }
  1387. static void __exit dlfb_module_exit(void)
  1388. {
  1389. usb_deregister(&dlfb_driver);
  1390. }
  1391. module_init(dlfb_module_init);
  1392. module_exit(dlfb_module_exit);
  1393. static void dlfb_urb_completion(struct urb *urb)
  1394. {
  1395. struct urb_node *unode = urb->context;
  1396. struct dlfb_data *dev = unode->dev;
  1397. unsigned long flags;
  1398. /* sync/async unlink faults aren't errors */
  1399. if (urb->status) {
  1400. if (!(urb->status == -ENOENT ||
  1401. urb->status == -ECONNRESET ||
  1402. urb->status == -ESHUTDOWN)) {
  1403. pr_err("%s - nonzero write bulk status received: %d\n",
  1404. __func__, urb->status);
  1405. atomic_set(&dev->lost_pixels, 1);
  1406. }
  1407. }
  1408. urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
  1409. spin_lock_irqsave(&dev->urbs.lock, flags);
  1410. list_add_tail(&unode->entry, &dev->urbs.list);
  1411. dev->urbs.available++;
  1412. spin_unlock_irqrestore(&dev->urbs.lock, flags);
  1413. /*
  1414. * When using fb_defio, we deadlock if up() is called
  1415. * while another is waiting. So queue to another process.
  1416. */
  1417. if (fb_defio)
  1418. schedule_delayed_work(&unode->release_urb_work, 0);
  1419. else
  1420. up(&dev->urbs.limit_sem);
  1421. }
  1422. static void dlfb_free_urb_list(struct dlfb_data *dev)
  1423. {
  1424. int count = dev->urbs.count;
  1425. struct list_head *node;
  1426. struct urb_node *unode;
  1427. struct urb *urb;
  1428. int ret;
  1429. unsigned long flags;
  1430. pr_notice("Waiting for completes and freeing all render urbs\n");
  1431. /* keep waiting and freeing, until we've got 'em all */
  1432. while (count--) {
  1433. /* Getting interrupted means a leak, but ok at shutdown*/
  1434. ret = down_interruptible(&dev->urbs.limit_sem);
  1435. if (ret)
  1436. break;
  1437. spin_lock_irqsave(&dev->urbs.lock, flags);
  1438. node = dev->urbs.list.next; /* have reserved one with sem */
  1439. list_del_init(node);
  1440. spin_unlock_irqrestore(&dev->urbs.lock, flags);
  1441. unode = list_entry(node, struct urb_node, entry);
  1442. urb = unode->urb;
  1443. /* Free each separately allocated piece */
  1444. usb_free_coherent(urb->dev, dev->urbs.size,
  1445. urb->transfer_buffer, urb->transfer_dma);
  1446. usb_free_urb(urb);
  1447. kfree(node);
  1448. }
  1449. }
  1450. static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
  1451. {
  1452. int i = 0;
  1453. struct urb *urb;
  1454. struct urb_node *unode;
  1455. char *buf;
  1456. spin_lock_init(&dev->urbs.lock);
  1457. dev->urbs.size = size;
  1458. INIT_LIST_HEAD(&dev->urbs.list);
  1459. while (i < count) {
  1460. unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
  1461. if (!unode)
  1462. break;
  1463. unode->dev = dev;
  1464. INIT_DELAYED_WORK(&unode->release_urb_work,
  1465. dlfb_release_urb_work);
  1466. urb = usb_alloc_urb(0, GFP_KERNEL);
  1467. if (!urb) {
  1468. kfree(unode);
  1469. break;
  1470. }
  1471. unode->urb = urb;
  1472. buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL,
  1473. &urb->transfer_dma);
  1474. if (!buf) {
  1475. kfree(unode);
  1476. usb_free_urb(urb);
  1477. break;
  1478. }
  1479. /* urb->transfer_buffer_length set to actual before submit */
  1480. usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
  1481. buf, size, dlfb_urb_completion, unode);
  1482. urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  1483. list_add_tail(&unode->entry, &dev->urbs.list);
  1484. i++;
  1485. }
  1486. sema_init(&dev->urbs.limit_sem, i);
  1487. dev->urbs.count = i;
  1488. dev->urbs.available = i;
  1489. pr_notice("allocated %d %d byte urbs\n", i, (int) size);
  1490. return i;
  1491. }
  1492. static struct urb *dlfb_get_urb(struct dlfb_data *dev)
  1493. {
  1494. int ret = 0;
  1495. struct list_head *entry;
  1496. struct urb_node *unode;
  1497. struct urb *urb = NULL;
  1498. unsigned long flags;
  1499. /* Wait for an in-flight buffer to complete and get re-queued */
  1500. ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
  1501. if (ret) {
  1502. atomic_set(&dev->lost_pixels, 1);
  1503. pr_warn("wait for urb interrupted: %x available: %d\n",
  1504. ret, dev->urbs.available);
  1505. goto error;
  1506. }
  1507. spin_lock_irqsave(&dev->urbs.lock, flags);
  1508. BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
  1509. entry = dev->urbs.list.next;
  1510. list_del_init(entry);
  1511. dev->urbs.available--;
  1512. spin_unlock_irqrestore(&dev->urbs.lock, flags);
  1513. unode = list_entry(entry, struct urb_node, entry);
  1514. urb = unode->urb;
  1515. error:
  1516. return urb;
  1517. }
  1518. static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len)
  1519. {
  1520. int ret;
  1521. BUG_ON(len > dev->urbs.size);
  1522. urb->transfer_buffer_length = len; /* set to actual payload len */
  1523. ret = usb_submit_urb(urb, GFP_KERNEL);
  1524. if (ret) {
  1525. dlfb_urb_completion(urb); /* because no one else will */
  1526. atomic_set(&dev->lost_pixels, 1);
  1527. pr_err("usb_submit_urb error %x\n", ret);
  1528. }
  1529. return ret;
  1530. }
  1531. module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
  1532. MODULE_PARM_DESC(console, "Allow fbcon to consume first framebuffer found");
  1533. module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
  1534. MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support. *Experimental*");
  1535. MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, "
  1536. "Jaya Kumar <jayakumar.lkml@gmail.com>, "
  1537. "Bernie Thompson <bernie@plugable.com>");
  1538. MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver");
  1539. MODULE_LICENSE("GPL");