nv50_vram.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_mm.h"
  27. static int types[0x80] = {
  28. 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  29. 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
  30. 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
  31. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  32. 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
  33. 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  34. 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
  35. 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
  36. };
  37. bool
  38. nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
  39. {
  40. int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
  41. if (likely(type < ARRAY_SIZE(types) && types[type]))
  42. return true;
  43. return false;
  44. }
  45. void
  46. nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
  47. {
  48. struct drm_nouveau_private *dev_priv = dev->dev_private;
  49. struct nouveau_mm *mm = dev_priv->engine.vram.mm;
  50. struct nouveau_mm_node *this;
  51. struct nouveau_mem *mem;
  52. mem = *pmem;
  53. *pmem = NULL;
  54. if (unlikely(mem == NULL))
  55. return;
  56. mutex_lock(&mm->mutex);
  57. while (!list_empty(&mem->regions)) {
  58. this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
  59. list_del(&this->rl_entry);
  60. nouveau_mm_put(mm, this);
  61. }
  62. if (mem->tag) {
  63. drm_mm_put_block(mem->tag);
  64. mem->tag = NULL;
  65. }
  66. mutex_unlock(&mm->mutex);
  67. kfree(mem);
  68. }
  69. int
  70. nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
  71. u32 memtype, struct nouveau_mem **pmem)
  72. {
  73. struct drm_nouveau_private *dev_priv = dev->dev_private;
  74. struct nouveau_mm *mm = dev_priv->engine.vram.mm;
  75. struct nouveau_mm_node *r;
  76. struct nouveau_mem *mem;
  77. int comp = (memtype & 0x300) >> 8;
  78. int type = (memtype & 0x07f);
  79. int ret;
  80. if (!types[type])
  81. return -EINVAL;
  82. size >>= 12;
  83. align >>= 12;
  84. size_nc >>= 12;
  85. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  86. if (!mem)
  87. return -ENOMEM;
  88. mutex_lock(&mm->mutex);
  89. if (comp) {
  90. if (align == 16) {
  91. struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
  92. int n = (size >> 4) * comp;
  93. mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
  94. if (mem->tag)
  95. mem->tag = drm_mm_get_block(mem->tag, n, 0);
  96. }
  97. if (unlikely(!mem->tag))
  98. comp = 0;
  99. }
  100. INIT_LIST_HEAD(&mem->regions);
  101. mem->dev = dev_priv->dev;
  102. mem->memtype = (comp << 7) | type;
  103. mem->size = size;
  104. do {
  105. ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
  106. if (ret) {
  107. mutex_unlock(&mm->mutex);
  108. nv50_vram_del(dev, &mem);
  109. return ret;
  110. }
  111. list_add_tail(&r->rl_entry, &mem->regions);
  112. size -= r->length;
  113. } while (size);
  114. mutex_unlock(&mm->mutex);
  115. r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
  116. mem->offset = (u64)r->offset << 12;
  117. *pmem = mem;
  118. return 0;
  119. }
  120. static u32
  121. nv50_vram_rblock(struct drm_device *dev)
  122. {
  123. struct drm_nouveau_private *dev_priv = dev->dev_private;
  124. int i, parts, colbits, rowbitsa, rowbitsb, banks;
  125. u64 rowsize, predicted;
  126. u32 r0, r4, rt, ru, rblock_size;
  127. r0 = nv_rd32(dev, 0x100200);
  128. r4 = nv_rd32(dev, 0x100204);
  129. rt = nv_rd32(dev, 0x100250);
  130. ru = nv_rd32(dev, 0x001540);
  131. NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
  132. for (i = 0, parts = 0; i < 8; i++) {
  133. if (ru & (0x00010000 << i))
  134. parts++;
  135. }
  136. colbits = (r4 & 0x0000f000) >> 12;
  137. rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
  138. rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
  139. banks = ((r4 & 0x01000000) ? 8 : 4);
  140. rowsize = parts * banks * (1 << colbits) * 8;
  141. predicted = rowsize << rowbitsa;
  142. if (r0 & 0x00000004)
  143. predicted += rowsize << rowbitsb;
  144. if (predicted != dev_priv->vram_size) {
  145. NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
  146. (u32)(dev_priv->vram_size >> 20));
  147. NV_WARN(dev, "we calculated %dMiB VRAM\n",
  148. (u32)(predicted >> 20));
  149. }
  150. rblock_size = rowsize;
  151. if (rt & 1)
  152. rblock_size *= 3;
  153. NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
  154. return rblock_size;
  155. }
  156. int
  157. nv50_vram_init(struct drm_device *dev)
  158. {
  159. struct drm_nouveau_private *dev_priv = dev->dev_private;
  160. struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
  161. const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
  162. const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
  163. u32 rblock, length;
  164. dev_priv->vram_size = nv_rd32(dev, 0x10020c);
  165. dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
  166. dev_priv->vram_size &= 0xffffffff00ULL;
  167. /* IGPs, no funky reordering happens here, they don't have VRAM */
  168. if (dev_priv->chipset == 0xaa ||
  169. dev_priv->chipset == 0xac ||
  170. dev_priv->chipset == 0xaf) {
  171. dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
  172. rblock = 4096 >> 12;
  173. } else {
  174. rblock = nv50_vram_rblock(dev) >> 12;
  175. }
  176. length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
  177. return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
  178. }
  179. void
  180. nv50_vram_fini(struct drm_device *dev)
  181. {
  182. struct drm_nouveau_private *dev_priv = dev->dev_private;
  183. struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
  184. nouveau_mm_fini(&vram->mm);
  185. }