mmp_ctrl.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * linux/drivers/video/mmp/hw/mmp_ctrl.c
  3. * Marvell MMP series Display Controller support
  4. *
  5. * Copyright (C) 2012 Marvell Technology Group Ltd.
  6. * Authors: Guoqing Li <ligq@marvell.com>
  7. * Lisa Du <cldu@marvell.com>
  8. * Zhou Zhu <zzhu3@marvell.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program. If not, see <http://www.gnu.org/licenses/>.
  22. *
  23. */
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/kernel.h>
  27. #include <linux/errno.h>
  28. #include <linux/string.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/slab.h>
  31. #include <linux/delay.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/clk.h>
  35. #include <linux/err.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/kthread.h>
  39. #include <linux/io.h>
  40. #include "mmp_ctrl.h"
  41. static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
  42. {
  43. struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
  44. u32 isr, imask, tmp;
  45. isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
  46. imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
  47. do {
  48. /* clear clock only */
  49. tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
  50. if (tmp & isr)
  51. writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
  52. } while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
  53. return IRQ_HANDLED;
  54. }
  55. static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
  56. {
  57. u32 link_config = path_to_path_plat(overlay->path)->link_config;
  58. u32 rbswap, uvswap = 0, yuvswap = 0,
  59. csc_en = 0, val = 0,
  60. vid = overlay_is_vid(overlay);
  61. switch (pix_fmt) {
  62. case PIXFMT_RGB565:
  63. case PIXFMT_RGB1555:
  64. case PIXFMT_RGB888PACK:
  65. case PIXFMT_RGB888UNPACK:
  66. case PIXFMT_RGBA888:
  67. rbswap = !(link_config & 0x1);
  68. break;
  69. case PIXFMT_VYUY:
  70. case PIXFMT_YVU422P:
  71. case PIXFMT_YVU420P:
  72. rbswap = link_config & 0x1;
  73. uvswap = 1;
  74. break;
  75. case PIXFMT_YUYV:
  76. rbswap = link_config & 0x1;
  77. yuvswap = 1;
  78. break;
  79. default:
  80. rbswap = link_config & 0x1;
  81. break;
  82. }
  83. switch (pix_fmt) {
  84. case PIXFMT_RGB565:
  85. case PIXFMT_BGR565:
  86. val = 0;
  87. break;
  88. case PIXFMT_RGB1555:
  89. case PIXFMT_BGR1555:
  90. val = 0x1;
  91. break;
  92. case PIXFMT_RGB888PACK:
  93. case PIXFMT_BGR888PACK:
  94. val = 0x2;
  95. break;
  96. case PIXFMT_RGB888UNPACK:
  97. case PIXFMT_BGR888UNPACK:
  98. val = 0x3;
  99. break;
  100. case PIXFMT_RGBA888:
  101. case PIXFMT_BGRA888:
  102. val = 0x4;
  103. break;
  104. case PIXFMT_UYVY:
  105. case PIXFMT_VYUY:
  106. case PIXFMT_YUYV:
  107. val = 0x5;
  108. csc_en = 1;
  109. break;
  110. case PIXFMT_YUV422P:
  111. case PIXFMT_YVU422P:
  112. val = 0x6;
  113. csc_en = 1;
  114. break;
  115. case PIXFMT_YUV420P:
  116. case PIXFMT_YVU420P:
  117. val = 0x7;
  118. csc_en = 1;
  119. break;
  120. default:
  121. break;
  122. }
  123. return (dma_palette(0) | dma_fmt(vid, val) |
  124. dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
  125. dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
  126. }
  127. static void dmafetch_set_fmt(struct mmp_overlay *overlay)
  128. {
  129. u32 tmp;
  130. struct mmp_path *path = overlay->path;
  131. tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
  132. tmp &= ~dma_mask(overlay_is_vid(overlay));
  133. tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
  134. writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
  135. }
  136. static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
  137. {
  138. struct lcd_regs *regs = path_regs(overlay->path);
  139. u32 pitch;
  140. /* assert win supported */
  141. memcpy(&overlay->win, win, sizeof(struct mmp_win));
  142. mutex_lock(&overlay->access_ok);
  143. pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
  144. writel_relaxed(pitch, &regs->g_pitch);
  145. writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
  146. writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
  147. writel_relaxed(0, &regs->g_start);
  148. dmafetch_set_fmt(overlay);
  149. mutex_unlock(&overlay->access_ok);
  150. }
  151. static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
  152. {
  153. u32 mask = overlay_is_vid(overlay) ? CFG_GRA_ENA_MASK :
  154. CFG_DMA_ENA_MASK;
  155. u32 enable = overlay_is_vid(overlay) ? CFG_GRA_ENA(1) : CFG_DMA_ENA(1);
  156. u32 tmp;
  157. struct mmp_path *path = overlay->path;
  158. mutex_lock(&overlay->access_ok);
  159. tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
  160. tmp &= ~mask;
  161. tmp |= (on ? enable : 0);
  162. writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
  163. mutex_unlock(&overlay->access_ok);
  164. }
  165. static void path_enabledisable(struct mmp_path *path, int on)
  166. {
  167. u32 tmp;
  168. mutex_lock(&path->access_ok);
  169. tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
  170. if (on)
  171. tmp &= ~SCLK_DISABLE;
  172. else
  173. tmp |= SCLK_DISABLE;
  174. writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
  175. mutex_unlock(&path->access_ok);
  176. }
  177. static void path_onoff(struct mmp_path *path, int on)
  178. {
  179. if (path->status == on) {
  180. dev_info(path->dev, "path %s is already %s\n",
  181. path->name, stat_name(path->status));
  182. return;
  183. }
  184. if (on) {
  185. path_enabledisable(path, 1);
  186. if (path->panel && path->panel->set_onoff)
  187. path->panel->set_onoff(path->panel, 1);
  188. } else {
  189. if (path->panel && path->panel->set_onoff)
  190. path->panel->set_onoff(path->panel, 0);
  191. path_enabledisable(path, 0);
  192. }
  193. path->status = on;
  194. }
  195. static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
  196. {
  197. if (overlay->status == on) {
  198. dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
  199. overlay->path->name, stat_name(overlay->status));
  200. return;
  201. }
  202. overlay->status = on;
  203. dmafetch_onoff(overlay, on);
  204. if (overlay->path->ops.check_status(overlay->path)
  205. != overlay->path->status)
  206. path_onoff(overlay->path, on);
  207. }
  208. static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
  209. {
  210. overlay->dmafetch_id = fetch_id;
  211. }
  212. static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
  213. {
  214. struct lcd_regs *regs = path_regs(overlay->path);
  215. /* FIXME: assert addr supported */
  216. memcpy(&overlay->addr, addr, sizeof(struct mmp_win));
  217. writel(addr->phys[0], &regs->g_0);
  218. return overlay->addr.phys[0];
  219. }
  220. static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
  221. {
  222. struct lcd_regs *regs = path_regs(path);
  223. u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
  224. link_config = path_to_path_plat(path)->link_config;
  225. /* FIXME: assert videomode supported */
  226. memcpy(&path->mode, mode, sizeof(struct mmp_mode));
  227. mutex_lock(&path->access_ok);
  228. /* polarity of timing signals */
  229. tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
  230. tmp |= mode->vsync_invert ? 0 : 0x8;
  231. tmp |= mode->hsync_invert ? 0 : 0x4;
  232. tmp |= link_config & CFG_DUMBMODE_MASK;
  233. tmp |= CFG_DUMB_ENA(1);
  234. writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
  235. writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
  236. writel_relaxed((mode->left_margin << 16) | mode->right_margin,
  237. &regs->screen_h_porch);
  238. writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
  239. &regs->screen_v_porch);
  240. total_x = mode->xres + mode->left_margin + mode->right_margin +
  241. mode->hsync_len;
  242. total_y = mode->yres + mode->upper_margin + mode->lower_margin +
  243. mode->vsync_len;
  244. writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
  245. /* vsync ctrl */
  246. if (path->output_type == PATH_OUT_DSI)
  247. vsync_ctrl = 0x01330133;
  248. else
  249. vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
  250. | (mode->xres + mode->right_margin);
  251. writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
  252. /* set pixclock div */
  253. sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
  254. sclk_div = sclk_src / mode->pixclock_freq;
  255. if (sclk_div * mode->pixclock_freq < sclk_src)
  256. sclk_div++;
  257. dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
  258. __func__, sclk_src, sclk_div, mode->pixclock_freq);
  259. tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
  260. tmp &= ~CLK_INT_DIV_MASK;
  261. tmp |= sclk_div;
  262. writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
  263. mutex_unlock(&path->access_ok);
  264. }
  265. static struct mmp_overlay_ops mmphw_overlay_ops = {
  266. .set_fetch = overlay_set_fetch,
  267. .set_onoff = overlay_set_onoff,
  268. .set_win = overlay_set_win,
  269. .set_addr = overlay_set_addr,
  270. };
  271. static void ctrl_set_default(struct mmphw_ctrl *ctrl)
  272. {
  273. u32 tmp, irq_mask;
  274. /*
  275. * LCD Global control(LCD_TOP_CTRL) should be configed before
  276. * any other LCD registers read/write, or there maybe issues.
  277. */
  278. tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
  279. tmp |= 0xfff0;
  280. writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
  281. /* disable all interrupts */
  282. irq_mask = path_imasks(0) | err_imask(0) |
  283. path_imasks(1) | err_imask(1);
  284. tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
  285. tmp &= ~irq_mask;
  286. tmp |= irq_mask;
  287. writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
  288. }
  289. static void path_set_default(struct mmp_path *path)
  290. {
  291. struct lcd_regs *regs = path_regs(path);
  292. u32 dma_ctrl1, mask, tmp, path_config;
  293. path_config = path_to_path_plat(path)->path_config;
  294. /* Configure IOPAD: should be parallel only */
  295. if (PATH_OUT_PARALLEL == path->output_type) {
  296. mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
  297. tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
  298. tmp &= ~mask;
  299. tmp |= path_config;
  300. writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
  301. }
  302. /* Select path clock source */
  303. tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
  304. tmp &= ~SCLK_SRC_SEL_MASK;
  305. tmp |= path_config;
  306. writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
  307. /*
  308. * Configure default bits: vsync triggers DMA,
  309. * power save enable, configure alpha registers to
  310. * display 100% graphics, and set pixel command.
  311. */
  312. dma_ctrl1 = 0x2032ff81;
  313. dma_ctrl1 |= CFG_VSYNC_INV_MASK;
  314. writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
  315. /* Configure default register values */
  316. writel_relaxed(0x00000000, &regs->blank_color);
  317. writel_relaxed(0x00000000, &regs->g_1);
  318. writel_relaxed(0x00000000, &regs->g_start);
  319. /*
  320. * 1.enable multiple burst request in DMA AXI
  321. * bus arbiter for faster read if not tv path;
  322. * 2.enable horizontal smooth filter;
  323. */
  324. if (PATH_PN == path->id) {
  325. mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
  326. | CFG_ARBFAST_ENA(1);
  327. tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
  328. tmp |= mask;
  329. writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
  330. } else if (PATH_TV == path->id) {
  331. mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
  332. | CFG_ARBFAST_ENA(1);
  333. tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
  334. tmp &= ~mask;
  335. tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
  336. writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
  337. }
  338. }
  339. static int path_init(struct mmphw_path_plat *path_plat,
  340. struct mmp_mach_path_config *config)
  341. {
  342. struct mmphw_ctrl *ctrl = path_plat->ctrl;
  343. struct mmp_path_info *path_info;
  344. struct mmp_path *path = NULL;
  345. dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
  346. /* init driver data */
  347. path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL);
  348. if (!path_info) {
  349. dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n",
  350. __func__, config->name);
  351. return 0;
  352. }
  353. path_info->name = config->name;
  354. path_info->id = path_plat->id;
  355. path_info->dev = ctrl->dev;
  356. path_info->overlay_num = config->overlay_num;
  357. path_info->overlay_ops = &mmphw_overlay_ops;
  358. path_info->set_mode = path_set_mode;
  359. path_info->plat_data = path_plat;
  360. /* create/register platform device */
  361. path = mmp_register_path(path_info);
  362. if (!path) {
  363. kfree(path_info);
  364. return 0;
  365. }
  366. path_plat->path = path;
  367. path_plat->path_config = config->path_config;
  368. path_plat->link_config = config->link_config;
  369. path_set_default(path);
  370. kfree(path_info);
  371. return 1;
  372. }
  373. static void path_deinit(struct mmphw_path_plat *path_plat)
  374. {
  375. if (!path_plat)
  376. return;
  377. if (path_plat->path)
  378. mmp_unregister_path(path_plat->path);
  379. }
  380. static int mmphw_probe(struct platform_device *pdev)
  381. {
  382. struct mmp_mach_plat_info *mi;
  383. struct resource *res;
  384. int ret, i, size, irq;
  385. struct mmphw_path_plat *path_plat;
  386. struct mmphw_ctrl *ctrl = NULL;
  387. /* get resources from platform data */
  388. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  389. if (res == NULL) {
  390. dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
  391. ret = -ENOENT;
  392. goto failed;
  393. }
  394. irq = platform_get_irq(pdev, 0);
  395. if (irq < 0) {
  396. dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
  397. ret = -ENOENT;
  398. goto failed;
  399. }
  400. /* get configs from platform data */
  401. mi = pdev->dev.platform_data;
  402. if (mi == NULL || !mi->path_num || !mi->paths) {
  403. dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
  404. ret = -EINVAL;
  405. goto failed;
  406. }
  407. /* allocate */
  408. size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
  409. mi->path_num;
  410. ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  411. if (!ctrl) {
  412. ret = -ENOMEM;
  413. goto failed;
  414. }
  415. ctrl->name = mi->name;
  416. ctrl->path_num = mi->path_num;
  417. ctrl->dev = &pdev->dev;
  418. ctrl->irq = irq;
  419. platform_set_drvdata(pdev, ctrl);
  420. mutex_init(&ctrl->access_ok);
  421. /* map registers.*/
  422. if (!devm_request_mem_region(ctrl->dev, res->start,
  423. resource_size(res), ctrl->name)) {
  424. dev_err(ctrl->dev,
  425. "can't request region for resource %pR\n", res);
  426. ret = -EINVAL;
  427. goto failed;
  428. }
  429. ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
  430. res->start, resource_size(res));
  431. if (ctrl->reg_base == NULL) {
  432. dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
  433. res->start, res->end);
  434. ret = -ENOMEM;
  435. goto failed;
  436. }
  437. /* request irq */
  438. ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
  439. IRQF_SHARED, "lcd_controller", ctrl);
  440. if (ret < 0) {
  441. dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
  442. __func__, ctrl->irq);
  443. ret = -ENXIO;
  444. goto failed;
  445. }
  446. /* get clock */
  447. ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
  448. if (IS_ERR(ctrl->clk)) {
  449. dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
  450. ret = -ENOENT;
  451. goto failed_get_clk;
  452. }
  453. clk_prepare_enable(ctrl->clk);
  454. /* init global regs */
  455. ctrl_set_default(ctrl);
  456. /* init pathes from machine info and register them */
  457. for (i = 0; i < ctrl->path_num; i++) {
  458. /* get from config and machine info */
  459. path_plat = &ctrl->path_plats[i];
  460. path_plat->id = i;
  461. path_plat->ctrl = ctrl;
  462. /* path init */
  463. if (!path_init(path_plat, &mi->paths[i])) {
  464. ret = -EINVAL;
  465. goto failed_path_init;
  466. }
  467. }
  468. #ifdef CONFIG_MMP_DISP_SPI
  469. ret = lcd_spi_register(ctrl);
  470. if (ret < 0)
  471. goto failed_path_init;
  472. #endif
  473. dev_info(ctrl->dev, "device init done\n");
  474. return 0;
  475. failed_path_init:
  476. for (i = 0; i < ctrl->path_num; i++) {
  477. path_plat = &ctrl->path_plats[i];
  478. path_deinit(path_plat);
  479. }
  480. if (ctrl->clk) {
  481. devm_clk_put(ctrl->dev, ctrl->clk);
  482. clk_disable_unprepare(ctrl->clk);
  483. }
  484. failed_get_clk:
  485. devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
  486. failed:
  487. if (ctrl) {
  488. if (ctrl->reg_base)
  489. devm_iounmap(ctrl->dev, ctrl->reg_base);
  490. devm_release_mem_region(ctrl->dev, res->start,
  491. resource_size(res));
  492. devm_kfree(ctrl->dev, ctrl);
  493. }
  494. platform_set_drvdata(pdev, NULL);
  495. dev_err(&pdev->dev, "device init failed\n");
  496. return ret;
  497. }
  498. static struct platform_driver mmphw_driver = {
  499. .driver = {
  500. .name = "mmp-disp",
  501. .owner = THIS_MODULE,
  502. },
  503. .probe = mmphw_probe,
  504. };
  505. static int mmphw_init(void)
  506. {
  507. return platform_driver_register(&mmphw_driver);
  508. }
  509. module_init(mmphw_init);
  510. MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
  511. MODULE_DESCRIPTION("Framebuffer driver for mmp");
  512. MODULE_LICENSE("GPL");