nv10_fb.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #include "drmP.h"
  2. #include "drm.h"
  3. #include "nouveau_drv.h"
  4. #include "nouveau_drm.h"
  5. int
  6. nv1a_fb_vram_init(struct drm_device *dev)
  7. {
  8. struct drm_nouveau_private *dev_priv = dev->dev_private;
  9. struct pci_dev *bridge;
  10. uint32_t mem, mib;
  11. bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
  12. if (!bridge) {
  13. NV_ERROR(dev, "no bridge device\n");
  14. return 0;
  15. }
  16. if (dev_priv->chipset == 0x1a) {
  17. pci_read_config_dword(bridge, 0x7c, &mem);
  18. mib = ((mem >> 6) & 31) + 1;
  19. } else {
  20. pci_read_config_dword(bridge, 0x84, &mem);
  21. mib = ((mem >> 4) & 127) + 1;
  22. }
  23. dev_priv->vram_size = mib * 1024 * 1024;
  24. return 0;
  25. }
  26. int
  27. nv10_fb_vram_init(struct drm_device *dev)
  28. {
  29. struct drm_nouveau_private *dev_priv = dev->dev_private;
  30. u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
  31. dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
  32. if (dev_priv->card_type < NV_20) {
  33. u32 cfg0 = nv_rd32(dev, 0x100200);
  34. if (cfg0 & 0x00000001)
  35. dev_priv->vram_type = NV_MEM_TYPE_DDR1;
  36. else
  37. dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
  38. }
  39. return 0;
  40. }
  41. static struct drm_mm_node *
  42. nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
  43. {
  44. struct drm_nouveau_private *dev_priv = dev->dev_private;
  45. struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
  46. struct drm_mm_node *mem;
  47. int ret;
  48. ret = drm_mm_pre_get(&pfb->tag_heap);
  49. if (ret)
  50. return NULL;
  51. spin_lock(&dev_priv->tile.lock);
  52. mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
  53. if (mem)
  54. mem = drm_mm_get_block_atomic(mem, size, 0);
  55. spin_unlock(&dev_priv->tile.lock);
  56. return mem;
  57. }
  58. static void
  59. nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
  60. {
  61. struct drm_nouveau_private *dev_priv = dev->dev_private;
  62. spin_lock(&dev_priv->tile.lock);
  63. drm_mm_put_block(mem);
  64. spin_unlock(&dev_priv->tile.lock);
  65. }
  66. void
  67. nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
  68. uint32_t size, uint32_t pitch, uint32_t flags)
  69. {
  70. struct drm_nouveau_private *dev_priv = dev->dev_private;
  71. struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
  72. int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
  73. tile->addr = addr;
  74. tile->limit = max(1u, addr + size) - 1;
  75. tile->pitch = pitch;
  76. if (dev_priv->card_type == NV_20) {
  77. if (flags & NOUVEAU_GEM_TILE_ZETA) {
  78. /*
  79. * Allocate some of the on-die tag memory,
  80. * used to store Z compression meta-data (most
  81. * likely just a bitmap determining if a given
  82. * tile is compressed or not).
  83. */
  84. tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
  85. if (tile->tag_mem) {
  86. /* Enable Z compression */
  87. if (dev_priv->chipset >= 0x25)
  88. tile->zcomp = tile->tag_mem->start |
  89. (bpp == 16 ?
  90. NV25_PFB_ZCOMP_MODE_16 :
  91. NV25_PFB_ZCOMP_MODE_32);
  92. else
  93. tile->zcomp = tile->tag_mem->start |
  94. NV20_PFB_ZCOMP_EN |
  95. (bpp == 16 ? 0 :
  96. NV20_PFB_ZCOMP_MODE_32);
  97. }
  98. tile->addr |= 3;
  99. } else {
  100. tile->addr |= 1;
  101. }
  102. } else {
  103. tile->addr |= 1 << 31;
  104. }
  105. }
  106. void
  107. nv10_fb_free_tile_region(struct drm_device *dev, int i)
  108. {
  109. struct drm_nouveau_private *dev_priv = dev->dev_private;
  110. struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
  111. if (tile->tag_mem) {
  112. nv20_fb_free_tag(dev, tile->tag_mem);
  113. tile->tag_mem = NULL;
  114. }
  115. tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
  116. }
  117. void
  118. nv10_fb_set_tile_region(struct drm_device *dev, int i)
  119. {
  120. struct drm_nouveau_private *dev_priv = dev->dev_private;
  121. struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
  122. nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
  123. nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
  124. nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
  125. if (dev_priv->card_type == NV_20)
  126. nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
  127. }
  128. int
  129. nv10_fb_init(struct drm_device *dev)
  130. {
  131. struct drm_nouveau_private *dev_priv = dev->dev_private;
  132. struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
  133. int i;
  134. pfb->num_tiles = NV10_PFB_TILE__SIZE;
  135. if (dev_priv->card_type == NV_20)
  136. drm_mm_init(&pfb->tag_heap, 0,
  137. (dev_priv->chipset >= 0x25 ?
  138. 64 * 1024 : 32 * 1024));
  139. /* Turn all the tiling regions off. */
  140. for (i = 0; i < pfb->num_tiles; i++)
  141. pfb->set_tile_region(dev, i);
  142. return 0;
  143. }
  144. void
  145. nv10_fb_takedown(struct drm_device *dev)
  146. {
  147. struct drm_nouveau_private *dev_priv = dev->dev_private;
  148. struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
  149. int i;
  150. for (i = 0; i < pfb->num_tiles; i++)
  151. pfb->free_tile_region(dev, i);
  152. if (dev_priv->card_type == NV_20)
  153. drm_mm_takedown(&pfb->tag_heap);
  154. }