nouveau_state.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. /*
  2. * Copyright 2005 Stephane Marchesin
  3. * Copyright 2008 Stuart Bennett
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  21. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  22. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  23. * DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <linux/swab.h>
  26. #include <linux/slab.h>
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "drm_sarea.h"
  30. #include "drm_crtc_helper.h"
  31. #include <linux/vgaarb.h>
  32. #include <linux/vga_switcheroo.h>
  33. #include "nouveau_drv.h"
  34. #include "nouveau_drm.h"
  35. #include "nouveau_fbcon.h"
  36. #include "nv50_display.h"
  37. static void nouveau_stub_takedown(struct drm_device *dev) {}
  38. static int nouveau_init_engine_ptrs(struct drm_device *dev)
  39. {
  40. struct drm_nouveau_private *dev_priv = dev->dev_private;
  41. struct nouveau_engine *engine = &dev_priv->engine;
  42. switch (dev_priv->chipset & 0xf0) {
  43. case 0x00:
  44. engine->instmem.init = nv04_instmem_init;
  45. engine->instmem.takedown = nv04_instmem_takedown;
  46. engine->instmem.suspend = nv04_instmem_suspend;
  47. engine->instmem.resume = nv04_instmem_resume;
  48. engine->instmem.populate = nv04_instmem_populate;
  49. engine->instmem.clear = nv04_instmem_clear;
  50. engine->instmem.bind = nv04_instmem_bind;
  51. engine->instmem.unbind = nv04_instmem_unbind;
  52. engine->instmem.prepare_access = nv04_instmem_prepare_access;
  53. engine->instmem.finish_access = nv04_instmem_finish_access;
  54. engine->mc.init = nv04_mc_init;
  55. engine->mc.takedown = nv04_mc_takedown;
  56. engine->timer.init = nv04_timer_init;
  57. engine->timer.read = nv04_timer_read;
  58. engine->timer.takedown = nv04_timer_takedown;
  59. engine->fb.init = nv04_fb_init;
  60. engine->fb.takedown = nv04_fb_takedown;
  61. engine->graph.grclass = nv04_graph_grclass;
  62. engine->graph.init = nv04_graph_init;
  63. engine->graph.takedown = nv04_graph_takedown;
  64. engine->graph.fifo_access = nv04_graph_fifo_access;
  65. engine->graph.channel = nv04_graph_channel;
  66. engine->graph.create_context = nv04_graph_create_context;
  67. engine->graph.destroy_context = nv04_graph_destroy_context;
  68. engine->graph.load_context = nv04_graph_load_context;
  69. engine->graph.unload_context = nv04_graph_unload_context;
  70. engine->fifo.channels = 16;
  71. engine->fifo.init = nv04_fifo_init;
  72. engine->fifo.takedown = nouveau_stub_takedown;
  73. engine->fifo.disable = nv04_fifo_disable;
  74. engine->fifo.enable = nv04_fifo_enable;
  75. engine->fifo.reassign = nv04_fifo_reassign;
  76. engine->fifo.cache_flush = nv04_fifo_cache_flush;
  77. engine->fifo.cache_pull = nv04_fifo_cache_pull;
  78. engine->fifo.channel_id = nv04_fifo_channel_id;
  79. engine->fifo.create_context = nv04_fifo_create_context;
  80. engine->fifo.destroy_context = nv04_fifo_destroy_context;
  81. engine->fifo.load_context = nv04_fifo_load_context;
  82. engine->fifo.unload_context = nv04_fifo_unload_context;
  83. break;
  84. case 0x10:
  85. engine->instmem.init = nv04_instmem_init;
  86. engine->instmem.takedown = nv04_instmem_takedown;
  87. engine->instmem.suspend = nv04_instmem_suspend;
  88. engine->instmem.resume = nv04_instmem_resume;
  89. engine->instmem.populate = nv04_instmem_populate;
  90. engine->instmem.clear = nv04_instmem_clear;
  91. engine->instmem.bind = nv04_instmem_bind;
  92. engine->instmem.unbind = nv04_instmem_unbind;
  93. engine->instmem.prepare_access = nv04_instmem_prepare_access;
  94. engine->instmem.finish_access = nv04_instmem_finish_access;
  95. engine->mc.init = nv04_mc_init;
  96. engine->mc.takedown = nv04_mc_takedown;
  97. engine->timer.init = nv04_timer_init;
  98. engine->timer.read = nv04_timer_read;
  99. engine->timer.takedown = nv04_timer_takedown;
  100. engine->fb.init = nv10_fb_init;
  101. engine->fb.takedown = nv10_fb_takedown;
  102. engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
  103. engine->graph.grclass = nv10_graph_grclass;
  104. engine->graph.init = nv10_graph_init;
  105. engine->graph.takedown = nv10_graph_takedown;
  106. engine->graph.channel = nv10_graph_channel;
  107. engine->graph.create_context = nv10_graph_create_context;
  108. engine->graph.destroy_context = nv10_graph_destroy_context;
  109. engine->graph.fifo_access = nv04_graph_fifo_access;
  110. engine->graph.load_context = nv10_graph_load_context;
  111. engine->graph.unload_context = nv10_graph_unload_context;
  112. engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
  113. engine->fifo.channels = 32;
  114. engine->fifo.init = nv10_fifo_init;
  115. engine->fifo.takedown = nouveau_stub_takedown;
  116. engine->fifo.disable = nv04_fifo_disable;
  117. engine->fifo.enable = nv04_fifo_enable;
  118. engine->fifo.reassign = nv04_fifo_reassign;
  119. engine->fifo.cache_flush = nv04_fifo_cache_flush;
  120. engine->fifo.cache_pull = nv04_fifo_cache_pull;
  121. engine->fifo.channel_id = nv10_fifo_channel_id;
  122. engine->fifo.create_context = nv10_fifo_create_context;
  123. engine->fifo.destroy_context = nv10_fifo_destroy_context;
  124. engine->fifo.load_context = nv10_fifo_load_context;
  125. engine->fifo.unload_context = nv10_fifo_unload_context;
  126. break;
  127. case 0x20:
  128. engine->instmem.init = nv04_instmem_init;
  129. engine->instmem.takedown = nv04_instmem_takedown;
  130. engine->instmem.suspend = nv04_instmem_suspend;
  131. engine->instmem.resume = nv04_instmem_resume;
  132. engine->instmem.populate = nv04_instmem_populate;
  133. engine->instmem.clear = nv04_instmem_clear;
  134. engine->instmem.bind = nv04_instmem_bind;
  135. engine->instmem.unbind = nv04_instmem_unbind;
  136. engine->instmem.prepare_access = nv04_instmem_prepare_access;
  137. engine->instmem.finish_access = nv04_instmem_finish_access;
  138. engine->mc.init = nv04_mc_init;
  139. engine->mc.takedown = nv04_mc_takedown;
  140. engine->timer.init = nv04_timer_init;
  141. engine->timer.read = nv04_timer_read;
  142. engine->timer.takedown = nv04_timer_takedown;
  143. engine->fb.init = nv10_fb_init;
  144. engine->fb.takedown = nv10_fb_takedown;
  145. engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
  146. engine->graph.grclass = nv20_graph_grclass;
  147. engine->graph.init = nv20_graph_init;
  148. engine->graph.takedown = nv20_graph_takedown;
  149. engine->graph.channel = nv10_graph_channel;
  150. engine->graph.create_context = nv20_graph_create_context;
  151. engine->graph.destroy_context = nv20_graph_destroy_context;
  152. engine->graph.fifo_access = nv04_graph_fifo_access;
  153. engine->graph.load_context = nv20_graph_load_context;
  154. engine->graph.unload_context = nv20_graph_unload_context;
  155. engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
  156. engine->fifo.channels = 32;
  157. engine->fifo.init = nv10_fifo_init;
  158. engine->fifo.takedown = nouveau_stub_takedown;
  159. engine->fifo.disable = nv04_fifo_disable;
  160. engine->fifo.enable = nv04_fifo_enable;
  161. engine->fifo.reassign = nv04_fifo_reassign;
  162. engine->fifo.cache_flush = nv04_fifo_cache_flush;
  163. engine->fifo.cache_pull = nv04_fifo_cache_pull;
  164. engine->fifo.channel_id = nv10_fifo_channel_id;
  165. engine->fifo.create_context = nv10_fifo_create_context;
  166. engine->fifo.destroy_context = nv10_fifo_destroy_context;
  167. engine->fifo.load_context = nv10_fifo_load_context;
  168. engine->fifo.unload_context = nv10_fifo_unload_context;
  169. break;
  170. case 0x30:
  171. engine->instmem.init = nv04_instmem_init;
  172. engine->instmem.takedown = nv04_instmem_takedown;
  173. engine->instmem.suspend = nv04_instmem_suspend;
  174. engine->instmem.resume = nv04_instmem_resume;
  175. engine->instmem.populate = nv04_instmem_populate;
  176. engine->instmem.clear = nv04_instmem_clear;
  177. engine->instmem.bind = nv04_instmem_bind;
  178. engine->instmem.unbind = nv04_instmem_unbind;
  179. engine->instmem.prepare_access = nv04_instmem_prepare_access;
  180. engine->instmem.finish_access = nv04_instmem_finish_access;
  181. engine->mc.init = nv04_mc_init;
  182. engine->mc.takedown = nv04_mc_takedown;
  183. engine->timer.init = nv04_timer_init;
  184. engine->timer.read = nv04_timer_read;
  185. engine->timer.takedown = nv04_timer_takedown;
  186. engine->fb.init = nv10_fb_init;
  187. engine->fb.takedown = nv10_fb_takedown;
  188. engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
  189. engine->graph.grclass = nv30_graph_grclass;
  190. engine->graph.init = nv30_graph_init;
  191. engine->graph.takedown = nv20_graph_takedown;
  192. engine->graph.fifo_access = nv04_graph_fifo_access;
  193. engine->graph.channel = nv10_graph_channel;
  194. engine->graph.create_context = nv20_graph_create_context;
  195. engine->graph.destroy_context = nv20_graph_destroy_context;
  196. engine->graph.load_context = nv20_graph_load_context;
  197. engine->graph.unload_context = nv20_graph_unload_context;
  198. engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
  199. engine->fifo.channels = 32;
  200. engine->fifo.init = nv10_fifo_init;
  201. engine->fifo.takedown = nouveau_stub_takedown;
  202. engine->fifo.disable = nv04_fifo_disable;
  203. engine->fifo.enable = nv04_fifo_enable;
  204. engine->fifo.reassign = nv04_fifo_reassign;
  205. engine->fifo.cache_flush = nv04_fifo_cache_flush;
  206. engine->fifo.cache_pull = nv04_fifo_cache_pull;
  207. engine->fifo.channel_id = nv10_fifo_channel_id;
  208. engine->fifo.create_context = nv10_fifo_create_context;
  209. engine->fifo.destroy_context = nv10_fifo_destroy_context;
  210. engine->fifo.load_context = nv10_fifo_load_context;
  211. engine->fifo.unload_context = nv10_fifo_unload_context;
  212. break;
  213. case 0x40:
  214. case 0x60:
  215. engine->instmem.init = nv04_instmem_init;
  216. engine->instmem.takedown = nv04_instmem_takedown;
  217. engine->instmem.suspend = nv04_instmem_suspend;
  218. engine->instmem.resume = nv04_instmem_resume;
  219. engine->instmem.populate = nv04_instmem_populate;
  220. engine->instmem.clear = nv04_instmem_clear;
  221. engine->instmem.bind = nv04_instmem_bind;
  222. engine->instmem.unbind = nv04_instmem_unbind;
  223. engine->instmem.prepare_access = nv04_instmem_prepare_access;
  224. engine->instmem.finish_access = nv04_instmem_finish_access;
  225. engine->mc.init = nv40_mc_init;
  226. engine->mc.takedown = nv40_mc_takedown;
  227. engine->timer.init = nv04_timer_init;
  228. engine->timer.read = nv04_timer_read;
  229. engine->timer.takedown = nv04_timer_takedown;
  230. engine->fb.init = nv40_fb_init;
  231. engine->fb.takedown = nv40_fb_takedown;
  232. engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
  233. engine->graph.grclass = nv40_graph_grclass;
  234. engine->graph.init = nv40_graph_init;
  235. engine->graph.takedown = nv40_graph_takedown;
  236. engine->graph.fifo_access = nv04_graph_fifo_access;
  237. engine->graph.channel = nv40_graph_channel;
  238. engine->graph.create_context = nv40_graph_create_context;
  239. engine->graph.destroy_context = nv40_graph_destroy_context;
  240. engine->graph.load_context = nv40_graph_load_context;
  241. engine->graph.unload_context = nv40_graph_unload_context;
  242. engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
  243. engine->fifo.channels = 32;
  244. engine->fifo.init = nv40_fifo_init;
  245. engine->fifo.takedown = nouveau_stub_takedown;
  246. engine->fifo.disable = nv04_fifo_disable;
  247. engine->fifo.enable = nv04_fifo_enable;
  248. engine->fifo.reassign = nv04_fifo_reassign;
  249. engine->fifo.cache_flush = nv04_fifo_cache_flush;
  250. engine->fifo.cache_pull = nv04_fifo_cache_pull;
  251. engine->fifo.channel_id = nv10_fifo_channel_id;
  252. engine->fifo.create_context = nv40_fifo_create_context;
  253. engine->fifo.destroy_context = nv40_fifo_destroy_context;
  254. engine->fifo.load_context = nv40_fifo_load_context;
  255. engine->fifo.unload_context = nv40_fifo_unload_context;
  256. break;
  257. case 0x50:
  258. case 0x80: /* gotta love NVIDIA's consistency.. */
  259. case 0x90:
  260. case 0xA0:
  261. engine->instmem.init = nv50_instmem_init;
  262. engine->instmem.takedown = nv50_instmem_takedown;
  263. engine->instmem.suspend = nv50_instmem_suspend;
  264. engine->instmem.resume = nv50_instmem_resume;
  265. engine->instmem.populate = nv50_instmem_populate;
  266. engine->instmem.clear = nv50_instmem_clear;
  267. engine->instmem.bind = nv50_instmem_bind;
  268. engine->instmem.unbind = nv50_instmem_unbind;
  269. engine->instmem.prepare_access = nv50_instmem_prepare_access;
  270. engine->instmem.finish_access = nv50_instmem_finish_access;
  271. engine->mc.init = nv50_mc_init;
  272. engine->mc.takedown = nv50_mc_takedown;
  273. engine->timer.init = nv04_timer_init;
  274. engine->timer.read = nv04_timer_read;
  275. engine->timer.takedown = nv04_timer_takedown;
  276. engine->fb.init = nv50_fb_init;
  277. engine->fb.takedown = nv50_fb_takedown;
  278. engine->graph.grclass = nv50_graph_grclass;
  279. engine->graph.init = nv50_graph_init;
  280. engine->graph.takedown = nv50_graph_takedown;
  281. engine->graph.fifo_access = nv50_graph_fifo_access;
  282. engine->graph.channel = nv50_graph_channel;
  283. engine->graph.create_context = nv50_graph_create_context;
  284. engine->graph.destroy_context = nv50_graph_destroy_context;
  285. engine->graph.load_context = nv50_graph_load_context;
  286. engine->graph.unload_context = nv50_graph_unload_context;
  287. engine->fifo.channels = 128;
  288. engine->fifo.init = nv50_fifo_init;
  289. engine->fifo.takedown = nv50_fifo_takedown;
  290. engine->fifo.disable = nv04_fifo_disable;
  291. engine->fifo.enable = nv04_fifo_enable;
  292. engine->fifo.reassign = nv04_fifo_reassign;
  293. engine->fifo.channel_id = nv50_fifo_channel_id;
  294. engine->fifo.create_context = nv50_fifo_create_context;
  295. engine->fifo.destroy_context = nv50_fifo_destroy_context;
  296. engine->fifo.load_context = nv50_fifo_load_context;
  297. engine->fifo.unload_context = nv50_fifo_unload_context;
  298. break;
  299. default:
  300. NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
  301. return 1;
  302. }
  303. return 0;
  304. }
  305. static unsigned int
  306. nouveau_vga_set_decode(void *priv, bool state)
  307. {
  308. struct drm_device *dev = priv;
  309. struct drm_nouveau_private *dev_priv = dev->dev_private;
  310. if (dev_priv->chipset >= 0x40)
  311. nv_wr32(dev, 0x88054, state);
  312. else
  313. nv_wr32(dev, 0x1854, state);
  314. if (state)
  315. return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  316. VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  317. else
  318. return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  319. }
  320. static int
  321. nouveau_card_init_channel(struct drm_device *dev)
  322. {
  323. struct drm_nouveau_private *dev_priv = dev->dev_private;
  324. struct nouveau_gpuobj *gpuobj;
  325. int ret;
  326. ret = nouveau_channel_alloc(dev, &dev_priv->channel,
  327. (struct drm_file *)-2,
  328. NvDmaFB, NvDmaTT);
  329. if (ret)
  330. return ret;
  331. gpuobj = NULL;
  332. ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
  333. 0, dev_priv->vram_size,
  334. NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
  335. &gpuobj);
  336. if (ret)
  337. goto out_err;
  338. ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
  339. gpuobj, NULL);
  340. if (ret)
  341. goto out_err;
  342. gpuobj = NULL;
  343. ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
  344. dev_priv->gart_info.aper_size,
  345. NV_DMA_ACCESS_RW, &gpuobj, NULL);
  346. if (ret)
  347. goto out_err;
  348. ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
  349. gpuobj, NULL);
  350. if (ret)
  351. goto out_err;
  352. return 0;
  353. out_err:
  354. nouveau_gpuobj_del(dev, &gpuobj);
  355. nouveau_channel_free(dev_priv->channel);
  356. dev_priv->channel = NULL;
  357. return ret;
  358. }
  359. static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
  360. enum vga_switcheroo_state state)
  361. {
  362. struct drm_device *dev = pci_get_drvdata(pdev);
  363. pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
  364. if (state == VGA_SWITCHEROO_ON) {
  365. printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
  366. nouveau_pci_resume(pdev);
  367. drm_kms_helper_poll_enable(dev);
  368. } else {
  369. printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
  370. drm_kms_helper_poll_disable(dev);
  371. nouveau_pci_suspend(pdev, pmm);
  372. }
  373. }
  374. static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
  375. {
  376. struct drm_device *dev = pci_get_drvdata(pdev);
  377. bool can_switch;
  378. spin_lock(&dev->count_lock);
  379. can_switch = (dev->open_count == 0);
  380. spin_unlock(&dev->count_lock);
  381. return can_switch;
  382. }
  383. int
  384. nouveau_card_init(struct drm_device *dev)
  385. {
  386. struct drm_nouveau_private *dev_priv = dev->dev_private;
  387. struct nouveau_engine *engine;
  388. int ret;
  389. NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
  390. if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
  391. return 0;
  392. vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
  393. vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
  394. nouveau_switcheroo_can_switch);
  395. /* Initialise internal driver API hooks */
  396. ret = nouveau_init_engine_ptrs(dev);
  397. if (ret)
  398. goto out;
  399. engine = &dev_priv->engine;
  400. dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
  401. spin_lock_init(&dev_priv->context_switch_lock);
  402. /* Parse BIOS tables / Run init tables if card not POSTed */
  403. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  404. ret = nouveau_bios_init(dev);
  405. if (ret)
  406. goto out;
  407. }
  408. ret = nouveau_mem_detect(dev);
  409. if (ret)
  410. goto out_bios;
  411. ret = nouveau_gpuobj_early_init(dev);
  412. if (ret)
  413. goto out_bios;
  414. /* Initialise instance memory, must happen before mem_init so we
  415. * know exactly how much VRAM we're able to use for "normal"
  416. * purposes.
  417. */
  418. ret = engine->instmem.init(dev);
  419. if (ret)
  420. goto out_gpuobj_early;
  421. /* Setup the memory manager */
  422. ret = nouveau_mem_init(dev);
  423. if (ret)
  424. goto out_instmem;
  425. ret = nouveau_gpuobj_init(dev);
  426. if (ret)
  427. goto out_mem;
  428. /* PMC */
  429. ret = engine->mc.init(dev);
  430. if (ret)
  431. goto out_gpuobj;
  432. /* PTIMER */
  433. ret = engine->timer.init(dev);
  434. if (ret)
  435. goto out_mc;
  436. /* PFB */
  437. ret = engine->fb.init(dev);
  438. if (ret)
  439. goto out_timer;
  440. if (nouveau_noaccel)
  441. engine->graph.accel_blocked = true;
  442. else {
  443. /* PGRAPH */
  444. ret = engine->graph.init(dev);
  445. if (ret)
  446. goto out_fb;
  447. /* PFIFO */
  448. ret = engine->fifo.init(dev);
  449. if (ret)
  450. goto out_graph;
  451. }
  452. /* this call irq_preinstall, register irq handler and
  453. * call irq_postinstall
  454. */
  455. ret = drm_irq_install(dev);
  456. if (ret)
  457. goto out_fifo;
  458. ret = drm_vblank_init(dev, 0);
  459. if (ret)
  460. goto out_irq;
  461. /* what about PVIDEO/PCRTC/PRAMDAC etc? */
  462. if (!engine->graph.accel_blocked) {
  463. ret = nouveau_card_init_channel(dev);
  464. if (ret)
  465. goto out_irq;
  466. }
  467. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  468. if (dev_priv->card_type >= NV_50)
  469. ret = nv50_display_create(dev);
  470. else
  471. ret = nv04_display_create(dev);
  472. if (ret)
  473. goto out_channel;
  474. }
  475. ret = nouveau_backlight_init(dev);
  476. if (ret)
  477. NV_ERROR(dev, "Error %d registering backlight\n", ret);
  478. dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
  479. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  480. nouveau_fbcon_init(dev);
  481. drm_kms_helper_poll_init(dev);
  482. }
  483. return 0;
  484. out_channel:
  485. if (dev_priv->channel) {
  486. nouveau_channel_free(dev_priv->channel);
  487. dev_priv->channel = NULL;
  488. }
  489. out_irq:
  490. drm_irq_uninstall(dev);
  491. out_fifo:
  492. if (!nouveau_noaccel)
  493. engine->fifo.takedown(dev);
  494. out_graph:
  495. if (!nouveau_noaccel)
  496. engine->graph.takedown(dev);
  497. out_fb:
  498. engine->fb.takedown(dev);
  499. out_timer:
  500. engine->timer.takedown(dev);
  501. out_mc:
  502. engine->mc.takedown(dev);
  503. out_gpuobj:
  504. nouveau_gpuobj_takedown(dev);
  505. out_mem:
  506. nouveau_sgdma_takedown(dev);
  507. nouveau_mem_close(dev);
  508. out_instmem:
  509. engine->instmem.takedown(dev);
  510. out_gpuobj_early:
  511. nouveau_gpuobj_late_takedown(dev);
  512. out_bios:
  513. nouveau_bios_takedown(dev);
  514. out:
  515. vga_client_register(dev->pdev, NULL, NULL, NULL);
  516. return ret;
  517. }
  518. static void nouveau_card_takedown(struct drm_device *dev)
  519. {
  520. struct drm_nouveau_private *dev_priv = dev->dev_private;
  521. struct nouveau_engine *engine = &dev_priv->engine;
  522. NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
  523. if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
  524. nouveau_backlight_exit(dev);
  525. if (dev_priv->channel) {
  526. nouveau_channel_free(dev_priv->channel);
  527. dev_priv->channel = NULL;
  528. }
  529. if (!nouveau_noaccel) {
  530. engine->fifo.takedown(dev);
  531. engine->graph.takedown(dev);
  532. }
  533. engine->fb.takedown(dev);
  534. engine->timer.takedown(dev);
  535. engine->mc.takedown(dev);
  536. mutex_lock(&dev->struct_mutex);
  537. ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
  538. ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
  539. mutex_unlock(&dev->struct_mutex);
  540. nouveau_sgdma_takedown(dev);
  541. nouveau_gpuobj_takedown(dev);
  542. nouveau_mem_close(dev);
  543. engine->instmem.takedown(dev);
  544. if (drm_core_check_feature(dev, DRIVER_MODESET))
  545. drm_irq_uninstall(dev);
  546. nouveau_gpuobj_late_takedown(dev);
  547. nouveau_bios_takedown(dev);
  548. vga_client_register(dev->pdev, NULL, NULL, NULL);
  549. dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
  550. }
  551. }
  552. /* here a client dies, release the stuff that was allocated for its
  553. * file_priv */
  554. void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
  555. {
  556. nouveau_channel_cleanup(dev, file_priv);
  557. }
  558. /* first module load, setup the mmio/fb mapping */
  559. /* KMS: we need mmio at load time, not when the first drm client opens. */
  560. int nouveau_firstopen(struct drm_device *dev)
  561. {
  562. return 0;
  563. }
  564. /* if we have an OF card, copy vbios to RAMIN */
  565. static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
  566. {
  567. #if defined(__powerpc__)
  568. int size, i;
  569. const uint32_t *bios;
  570. struct device_node *dn = pci_device_to_OF_node(dev->pdev);
  571. if (!dn) {
  572. NV_INFO(dev, "Unable to get the OF node\n");
  573. return;
  574. }
  575. bios = of_get_property(dn, "NVDA,BMP", &size);
  576. if (bios) {
  577. for (i = 0; i < size; i += 4)
  578. nv_wi32(dev, i, bios[i/4]);
  579. NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
  580. } else {
  581. NV_INFO(dev, "Unable to get the OF bios\n");
  582. }
  583. #endif
  584. }
  585. static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
  586. {
  587. struct pci_dev *pdev = dev->pdev;
  588. struct apertures_struct *aper = alloc_apertures(3);
  589. if (!aper)
  590. return NULL;
  591. aper->ranges[0].base = pci_resource_start(pdev, 1);
  592. aper->ranges[0].size = pci_resource_len(pdev, 1);
  593. aper->count = 1;
  594. if (pci_resource_len(pdev, 2)) {
  595. aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
  596. aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
  597. aper->count++;
  598. }
  599. if (pci_resource_len(pdev, 3)) {
  600. aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
  601. aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
  602. aper->count++;
  603. }
  604. return aper;
  605. }
  606. static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
  607. {
  608. struct drm_nouveau_private *dev_priv = dev->dev_private;
  609. bool primary = false;
  610. dev_priv->apertures = nouveau_get_apertures(dev);
  611. if (!dev_priv->apertures)
  612. return -ENOMEM;
  613. #ifdef CONFIG_X86
  614. primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
  615. #endif
  616. remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
  617. return 0;
  618. }
  619. int nouveau_load(struct drm_device *dev, unsigned long flags)
  620. {
  621. struct drm_nouveau_private *dev_priv;
  622. uint32_t reg0;
  623. resource_size_t mmio_start_offs;
  624. dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  625. if (!dev_priv)
  626. return -ENOMEM;
  627. dev->dev_private = dev_priv;
  628. dev_priv->dev = dev;
  629. dev_priv->flags = flags & NOUVEAU_FLAGS;
  630. dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
  631. NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
  632. dev->pci_vendor, dev->pci_device, dev->pdev->class);
  633. dev_priv->wq = create_workqueue("nouveau");
  634. if (!dev_priv->wq)
  635. return -EINVAL;
  636. /* resource 0 is mmio regs */
  637. /* resource 1 is linear FB */
  638. /* resource 2 is RAMIN (mmio regs + 0x1000000) */
  639. /* resource 6 is bios */
  640. /* map the mmio regs */
  641. mmio_start_offs = pci_resource_start(dev->pdev, 0);
  642. dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
  643. if (!dev_priv->mmio) {
  644. NV_ERROR(dev, "Unable to initialize the mmio mapping. "
  645. "Please report your setup to " DRIVER_EMAIL "\n");
  646. return -EINVAL;
  647. }
  648. NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
  649. (unsigned long long)mmio_start_offs);
  650. #ifdef __BIG_ENDIAN
  651. /* Put the card in BE mode if it's not */
  652. if (nv_rd32(dev, NV03_PMC_BOOT_1))
  653. nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
  654. DRM_MEMORYBARRIER();
  655. #endif
  656. /* Time to determine the card architecture */
  657. reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
  658. /* We're dealing with >=NV10 */
  659. if ((reg0 & 0x0f000000) > 0) {
  660. /* Bit 27-20 contain the architecture in hex */
  661. dev_priv->chipset = (reg0 & 0xff00000) >> 20;
  662. /* NV04 or NV05 */
  663. } else if ((reg0 & 0xff00fff0) == 0x20004000) {
  664. if (reg0 & 0x00f00000)
  665. dev_priv->chipset = 0x05;
  666. else
  667. dev_priv->chipset = 0x04;
  668. } else
  669. dev_priv->chipset = 0xff;
  670. switch (dev_priv->chipset & 0xf0) {
  671. case 0x00:
  672. case 0x10:
  673. case 0x20:
  674. case 0x30:
  675. dev_priv->card_type = dev_priv->chipset & 0xf0;
  676. break;
  677. case 0x40:
  678. case 0x60:
  679. dev_priv->card_type = NV_40;
  680. break;
  681. case 0x50:
  682. case 0x80:
  683. case 0x90:
  684. case 0xa0:
  685. dev_priv->card_type = NV_50;
  686. break;
  687. default:
  688. NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
  689. return -EINVAL;
  690. }
  691. NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
  692. dev_priv->card_type, reg0);
  693. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  694. int ret = nouveau_remove_conflicting_drivers(dev);
  695. if (ret)
  696. return ret;
  697. }
  698. /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
  699. if (dev_priv->card_type >= NV_40) {
  700. int ramin_bar = 2;
  701. if (pci_resource_len(dev->pdev, ramin_bar) == 0)
  702. ramin_bar = 3;
  703. dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
  704. dev_priv->ramin =
  705. ioremap(pci_resource_start(dev->pdev, ramin_bar),
  706. dev_priv->ramin_size);
  707. if (!dev_priv->ramin) {
  708. NV_ERROR(dev, "Failed to PRAMIN BAR");
  709. return -ENOMEM;
  710. }
  711. } else {
  712. dev_priv->ramin_size = 1 * 1024 * 1024;
  713. dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
  714. dev_priv->ramin_size);
  715. if (!dev_priv->ramin) {
  716. NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
  717. return -ENOMEM;
  718. }
  719. }
  720. nouveau_OF_copy_vbios_to_ramin(dev);
  721. /* Special flags */
  722. if (dev->pci_device == 0x01a0)
  723. dev_priv->flags |= NV_NFORCE;
  724. else if (dev->pci_device == 0x01f0)
  725. dev_priv->flags |= NV_NFORCE2;
  726. /* For kernel modesetting, init card now and bring up fbcon */
  727. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  728. int ret = nouveau_card_init(dev);
  729. if (ret)
  730. return ret;
  731. }
  732. return 0;
  733. }
  734. static void nouveau_close(struct drm_device *dev)
  735. {
  736. struct drm_nouveau_private *dev_priv = dev->dev_private;
  737. /* In the case of an error dev_priv may not be allocated yet */
  738. if (dev_priv)
  739. nouveau_card_takedown(dev);
  740. }
  741. /* KMS: we need mmio at load time, not when the first drm client opens. */
  742. void nouveau_lastclose(struct drm_device *dev)
  743. {
  744. if (drm_core_check_feature(dev, DRIVER_MODESET))
  745. return;
  746. nouveau_close(dev);
  747. }
  748. int nouveau_unload(struct drm_device *dev)
  749. {
  750. struct drm_nouveau_private *dev_priv = dev->dev_private;
  751. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  752. drm_kms_helper_poll_fini(dev);
  753. nouveau_fbcon_fini(dev);
  754. if (dev_priv->card_type >= NV_50)
  755. nv50_display_destroy(dev);
  756. else
  757. nv04_display_destroy(dev);
  758. nouveau_close(dev);
  759. }
  760. iounmap(dev_priv->mmio);
  761. iounmap(dev_priv->ramin);
  762. kfree(dev_priv);
  763. dev->dev_private = NULL;
  764. return 0;
  765. }
  766. int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
  767. struct drm_file *file_priv)
  768. {
  769. struct drm_nouveau_private *dev_priv = dev->dev_private;
  770. struct drm_nouveau_getparam *getparam = data;
  771. NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
  772. switch (getparam->param) {
  773. case NOUVEAU_GETPARAM_CHIPSET_ID:
  774. getparam->value = dev_priv->chipset;
  775. break;
  776. case NOUVEAU_GETPARAM_PCI_VENDOR:
  777. getparam->value = dev->pci_vendor;
  778. break;
  779. case NOUVEAU_GETPARAM_PCI_DEVICE:
  780. getparam->value = dev->pci_device;
  781. break;
  782. case NOUVEAU_GETPARAM_BUS_TYPE:
  783. if (drm_device_is_agp(dev))
  784. getparam->value = NV_AGP;
  785. else if (drm_device_is_pcie(dev))
  786. getparam->value = NV_PCIE;
  787. else
  788. getparam->value = NV_PCI;
  789. break;
  790. case NOUVEAU_GETPARAM_FB_PHYSICAL:
  791. getparam->value = dev_priv->fb_phys;
  792. break;
  793. case NOUVEAU_GETPARAM_AGP_PHYSICAL:
  794. getparam->value = dev_priv->gart_info.aper_base;
  795. break;
  796. case NOUVEAU_GETPARAM_PCI_PHYSICAL:
  797. if (dev->sg) {
  798. getparam->value = (unsigned long)dev->sg->virtual;
  799. } else {
  800. NV_ERROR(dev, "Requested PCIGART address, "
  801. "while no PCIGART was created\n");
  802. return -EINVAL;
  803. }
  804. break;
  805. case NOUVEAU_GETPARAM_FB_SIZE:
  806. getparam->value = dev_priv->fb_available_size;
  807. break;
  808. case NOUVEAU_GETPARAM_AGP_SIZE:
  809. getparam->value = dev_priv->gart_info.aper_size;
  810. break;
  811. case NOUVEAU_GETPARAM_VM_VRAM_BASE:
  812. getparam->value = dev_priv->vm_vram_base;
  813. break;
  814. case NOUVEAU_GETPARAM_PTIMER_TIME:
  815. getparam->value = dev_priv->engine.timer.read(dev);
  816. break;
  817. case NOUVEAU_GETPARAM_GRAPH_UNITS:
  818. /* NV40 and NV50 versions are quite different, but register
  819. * address is the same. User is supposed to know the card
  820. * family anyway... */
  821. if (dev_priv->chipset >= 0x40) {
  822. getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
  823. break;
  824. }
  825. /* FALLTHRU */
  826. default:
  827. NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
  828. return -EINVAL;
  829. }
  830. return 0;
  831. }
  832. int
  833. nouveau_ioctl_setparam(struct drm_device *dev, void *data,
  834. struct drm_file *file_priv)
  835. {
  836. struct drm_nouveau_setparam *setparam = data;
  837. NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
  838. switch (setparam->param) {
  839. default:
  840. NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
  841. return -EINVAL;
  842. }
  843. return 0;
  844. }
  845. /* Wait until (value(reg) & mask) == val, up until timeout has hit */
  846. bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
  847. uint32_t reg, uint32_t mask, uint32_t val)
  848. {
  849. struct drm_nouveau_private *dev_priv = dev->dev_private;
  850. struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
  851. uint64_t start = ptimer->read(dev);
  852. do {
  853. if ((nv_rd32(dev, reg) & mask) == val)
  854. return true;
  855. } while (ptimer->read(dev) - start < timeout);
  856. return false;
  857. }
  858. /* Waits for PGRAPH to go completely idle */
  859. bool nouveau_wait_for_idle(struct drm_device *dev)
  860. {
  861. if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
  862. NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
  863. nv_rd32(dev, NV04_PGRAPH_STATUS));
  864. return false;
  865. }
  866. return true;
  867. }