|
@@ -51,6 +51,11 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
|
|
|
static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
|
|
|
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
|
|
|
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
|
|
|
+static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
|
|
|
+ unsigned alignment);
|
|
|
+static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
|
|
|
+static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
|
|
|
+static int i915_gem_evict_something(struct drm_device *dev);
|
|
|
|
|
|
static void
|
|
|
i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
|
@@ -529,6 +534,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * i915_gem_fault - fault a page into the GTT
|
|
|
+ * vma: VMA in question
|
|
|
+ * vmf: fault info
|
|
|
+ *
|
|
|
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
|
|
|
+ * from userspace. The fault handler takes care of binding the object to
|
|
|
+ * the GTT (if needed), allocating and programming a fence register (again,
|
|
|
+ * only if needed based on whether the old reg is still valid or the object
|
|
|
+ * is tiled) and inserting a new PTE into the faulting process.
|
|
|
+ *
|
|
|
+ * Note that the faulting process may involve evicting existing objects
|
|
|
+ * from the GTT and/or fence registers to make room. So performance may
|
|
|
+ * suffer if the GTT working set is large or there are few fence registers
|
|
|
+ * left.
|
|
|
+ */
|
|
|
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = vma->vm_private_data;
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ pgoff_t page_offset;
|
|
|
+ unsigned long pfn;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /* We don't use vmf->pgoff since that has the fake offset */
|
|
|
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
|
|
|
+ PAGE_SHIFT;
|
|
|
+
|
|
|
+ /* Now bind it into the GTT if needed */
|
|
|
+ mutex_lock(&dev->struct_mutex);
|
|
|
+ if (!obj_priv->gtt_space) {
|
|
|
+ ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
|
|
|
+ if (ret) {
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+ }
|
|
|
+ list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Need a new fence register? */
|
|
|
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
|
|
|
+ obj_priv->tiling_mode != I915_TILING_NONE)
|
|
|
+ i915_gem_object_get_fence_reg(obj);
|
|
|
+
|
|
|
+ pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
|
|
|
+ page_offset;
|
|
|
+
|
|
|
+ /* Finally, remap it using the new GTT offset */
|
|
|
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
|
|
|
+
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+
|
|
|
+ switch (ret) {
|
|
|
+ case -ENOMEM:
|
|
|
+ case -EAGAIN:
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ case -EFAULT:
|
|
|
+ case -EBUSY:
|
|
|
+ DRM_ERROR("can't insert pfn?? fault or busy...\n");
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+ default:
|
|
|
+ return VM_FAULT_NOPAGE;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i915_gem_create_mmap_offset - create a fake mmap offset for an object
|
|
|
+ * @obj: obj in question
|
|
|
+ *
|
|
|
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
|
|
|
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
|
|
|
+ * up the object based on the offset and sets up the various memory mapping
|
|
|
+ * structures.
|
|
|
+ *
|
|
|
+ * This routine allocates and attaches a fake offset for @obj.
|
|
|
+ */
|
|
|
+static int
|
|
|
+i915_gem_create_mmap_offset(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ struct drm_gem_mm *mm = dev->mm_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ struct drm_map_list *list;
|
|
|
+ struct drm_map *map;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /* Set the object up for mmap'ing */
|
|
|
+ list = &obj->map_list;
|
|
|
+ list->map = drm_calloc(1, sizeof(struct drm_map_list),
|
|
|
+ DRM_MEM_DRIVER);
|
|
|
+ if (!list->map)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ map = list->map;
|
|
|
+ map->type = _DRM_GEM;
|
|
|
+ map->size = obj->size;
|
|
|
+ map->handle = obj;
|
|
|
+
|
|
|
+ /* Get a DRM GEM mmap offset allocated... */
|
|
|
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
|
|
|
+ obj->size / PAGE_SIZE, 0, 0);
|
|
|
+ if (!list->file_offset_node) {
|
|
|
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_free_list;
|
|
|
+ }
|
|
|
+
|
|
|
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
|
|
|
+ obj->size / PAGE_SIZE, 0);
|
|
|
+ if (!list->file_offset_node) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_free_list;
|
|
|
+ }
|
|
|
+
|
|
|
+ list->hash.key = list->file_offset_node->start;
|
|
|
+ if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
|
|
|
+ DRM_ERROR("failed to add to map hash\n");
|
|
|
+ goto out_free_mm;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* By now we should be all set, any drm_mmap request on the offset
|
|
|
+ * below will get to our mmap & fault handler */
|
|
|
+ obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free_mm:
|
|
|
+ drm_mm_put_block(list->file_offset_node);
|
|
|
+out_free_list:
|
|
|
+ drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
|
|
|
+ * @obj: object to check
|
|
|
+ *
|
|
|
+ * Return the required GTT alignment for an object, taking into account
|
|
|
+ * potential fence register mapping if needed.
|
|
|
+ */
|
|
|
+static uint32_t
|
|
|
+i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ int start, i;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Minimum alignment is 4k (GTT page size), but might be greater
|
|
|
+ * if a fence register is needed for the object.
|
|
|
+ */
|
|
|
+ if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
|
|
|
+ return 4096;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Previous chips need to be aligned to the size of the smallest
|
|
|
+ * fence register that can contain the object.
|
|
|
+ */
|
|
|
+ if (IS_I9XX(dev))
|
|
|
+ start = 1024*1024;
|
|
|
+ else
|
|
|
+ start = 512*1024;
|
|
|
+
|
|
|
+ for (i = start; i < obj->size; i <<= 1)
|
|
|
+ ;
|
|
|
+
|
|
|
+ return i;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
|
|
|
+ * @dev: DRM device
|
|
|
+ * @data: GTT mapping ioctl data
|
|
|
+ * @file_priv: GEM object info
|
|
|
+ *
|
|
|
+ * Simply returns the fake offset to userspace so it can mmap it.
|
|
|
+ * The mmap call will end up in drm_gem_mmap(), which will set things
|
|
|
+ * up so we can get faults in the handler above.
|
|
|
+ *
|
|
|
+ * The fault handler will take care of binding the object into the GTT
|
|
|
+ * (since it may have been evicted to make room for something), allocating
|
|
|
+ * a fence register, and mapping the appropriate aperture address into
|
|
|
+ * userspace.
|
|
|
+ */
|
|
|
+int
|
|
|
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_mmap_gtt *args = data;
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ struct drm_gem_object *obj;
|
|
|
+ struct drm_i915_gem_object *obj_priv;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
+ if (obj == NULL)
|
|
|
+ return -EBADF;
|
|
|
+
|
|
|
+ mutex_lock(&dev->struct_mutex);
|
|
|
+
|
|
|
+ obj_priv = obj->driver_private;
|
|
|
+
|
|
|
+ if (!obj_priv->mmap_offset) {
|
|
|
+ ret = i915_gem_create_mmap_offset(obj);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ args->offset = obj_priv->mmap_offset;
|
|
|
+
|
|
|
+ obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
|
|
|
+
|
|
|
+ /* Make sure the alignment is correct for fence regs etc */
|
|
|
+ if (obj_priv->agp_mem &&
|
|
|
+ (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Pull it into the GTT so that we have a page list (makes the
|
|
|
+ * initial fault faster and any subsequent flushing possible).
|
|
|
+ */
|
|
|
+ if (!obj_priv->agp_mem) {
|
|
|
+ ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
|
|
|
+ if (ret) {
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
+ }
|
|
|
+
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
i915_gem_object_free_page_list(struct drm_gem_object *obj)
|
|
|
{
|
|
@@ -726,6 +977,7 @@ i915_gem_retire_request(struct drm_device *dev,
|
|
|
*/
|
|
|
if (obj_priv->last_rendering_seqno != request->seqno)
|
|
|
return;
|
|
|
+
|
|
|
#if WATCH_LRU
|
|
|
DRM_INFO("%s: retire %d moves to inactive list %p\n",
|
|
|
__func__, request->seqno, obj);
|
|
@@ -956,6 +1208,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ loff_t offset;
|
|
|
int ret = 0;
|
|
|
|
|
|
#if WATCH_BUF
|
|
@@ -991,6 +1244,13 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|
|
|
|
|
BUG_ON(obj_priv->active);
|
|
|
|
|
|
+ /* blow away mappings if mapped through GTT */
|
|
|
+ offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
|
|
|
+ unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
|
|
|
+
|
|
|
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
|
|
+ i915_gem_clear_fence_reg(obj);
|
|
|
+
|
|
|
i915_gem_object_free_page_list(obj);
|
|
|
|
|
|
if (obj_priv->gtt_space) {
|
|
@@ -1149,6 +1409,203 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = reg->obj;
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ int regnum = obj_priv->fence_reg;
|
|
|
+ uint64_t val;
|
|
|
+
|
|
|
+ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
|
|
|
+ 0xfffff000) << 32;
|
|
|
+ val |= obj_priv->gtt_offset & 0xfffff000;
|
|
|
+ val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
|
|
|
+ if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
|
|
|
+ val |= I965_FENCE_REG_VALID;
|
|
|
+
|
|
|
+ I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
|
|
|
+}
|
|
|
+
|
|
|
+static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = reg->obj;
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ int regnum = obj_priv->fence_reg;
|
|
|
+ uint32_t val;
|
|
|
+ uint32_t pitch_val;
|
|
|
+
|
|
|
+ if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
|
|
|
+ (obj_priv->gtt_offset & (obj->size - 1))) {
|
|
|
+ WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
|
|
|
+ IS_I945GM(dev) ||
|
|
|
+ IS_G33(dev)))
|
|
|
+ pitch_val = (obj_priv->stride / 128) - 1;
|
|
|
+ else
|
|
|
+ pitch_val = (obj_priv->stride / 512) - 1;
|
|
|
+
|
|
|
+ val = obj_priv->gtt_offset;
|
|
|
+ if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
|
|
+ val |= I915_FENCE_SIZE_BITS(obj->size);
|
|
|
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
|
|
|
+ val |= I830_FENCE_REG_VALID;
|
|
|
+
|
|
|
+ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
|
|
|
+}
|
|
|
+
|
|
|
+static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj = reg->obj;
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ int regnum = obj_priv->fence_reg;
|
|
|
+ uint32_t val;
|
|
|
+ uint32_t pitch_val;
|
|
|
+
|
|
|
+ if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
|
|
|
+ (obj_priv->gtt_offset & (obj->size - 1))) {
|
|
|
+ WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ pitch_val = (obj_priv->stride / 128) - 1;
|
|
|
+
|
|
|
+ val = obj_priv->gtt_offset;
|
|
|
+ if (obj_priv->tiling_mode == I915_TILING_Y)
|
|
|
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
|
|
+ val |= I830_FENCE_SIZE_BITS(obj->size);
|
|
|
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
|
|
|
+ val |= I830_FENCE_REG_VALID;
|
|
|
+
|
|
|
+ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i915_gem_object_get_fence_reg - set up a fence reg for an object
|
|
|
+ * @obj: object to map through a fence reg
|
|
|
+ *
|
|
|
+ * When mapping objects through the GTT, userspace wants to be able to write
|
|
|
+ * to them without having to worry about swizzling if the object is tiled.
|
|
|
+ *
|
|
|
+ * This function walks the fence regs looking for a free one for @obj,
|
|
|
+ * stealing one if it can't find any.
|
|
|
+ *
|
|
|
+ * It then sets up the reg based on the object's properties: address, pitch
|
|
|
+ * and tiling format.
|
|
|
+ */
|
|
|
+static void
|
|
|
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+ struct drm_i915_fence_reg *reg = NULL;
|
|
|
+ int i, ret;
|
|
|
+
|
|
|
+ switch (obj_priv->tiling_mode) {
|
|
|
+ case I915_TILING_NONE:
|
|
|
+ WARN(1, "allocating a fence for non-tiled object?\n");
|
|
|
+ break;
|
|
|
+ case I915_TILING_X:
|
|
|
+ WARN(obj_priv->stride & (512 - 1),
|
|
|
+ "object is X tiled but has non-512B pitch\n");
|
|
|
+ break;
|
|
|
+ case I915_TILING_Y:
|
|
|
+ WARN(obj_priv->stride & (128 - 1),
|
|
|
+ "object is Y tiled but has non-128B pitch\n");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* First try to find a free reg */
|
|
|
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
|
|
|
+ reg = &dev_priv->fence_regs[i];
|
|
|
+ if (!reg->obj)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* None available, try to steal one or wait for a user to finish */
|
|
|
+ if (i == dev_priv->num_fence_regs) {
|
|
|
+ struct drm_i915_gem_object *old_obj_priv = NULL;
|
|
|
+ loff_t offset;
|
|
|
+
|
|
|
+try_again:
|
|
|
+ /* Could try to use LRU here instead... */
|
|
|
+ for (i = dev_priv->fence_reg_start;
|
|
|
+ i < dev_priv->num_fence_regs; i++) {
|
|
|
+ reg = &dev_priv->fence_regs[i];
|
|
|
+ old_obj_priv = reg->obj->driver_private;
|
|
|
+ if (!old_obj_priv->pin_count)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Now things get ugly... we have to wait for one of the
|
|
|
+ * objects to finish before trying again.
|
|
|
+ */
|
|
|
+ if (i == dev_priv->num_fence_regs) {
|
|
|
+ ret = i915_gem_object_wait_rendering(reg->obj);
|
|
|
+ if (ret) {
|
|
|
+ WARN(ret, "wait_rendering failed: %d\n", ret);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ goto try_again;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Zap this virtual mapping so we can set up a fence again
|
|
|
+ * for this object next time we need it.
|
|
|
+ */
|
|
|
+ offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
|
|
|
+ unmap_mapping_range(dev->dev_mapping, offset,
|
|
|
+ reg->obj->size, 1);
|
|
|
+ old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
|
|
+ }
|
|
|
+
|
|
|
+ obj_priv->fence_reg = i;
|
|
|
+ reg->obj = obj;
|
|
|
+
|
|
|
+ if (IS_I965G(dev))
|
|
|
+ i965_write_fence_reg(reg);
|
|
|
+ else if (IS_I9XX(dev))
|
|
|
+ i915_write_fence_reg(reg);
|
|
|
+ else
|
|
|
+ i830_write_fence_reg(reg);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i915_gem_clear_fence_reg - clear out fence register info
|
|
|
+ * @obj: object to clear
|
|
|
+ *
|
|
|
+ * Zeroes out the fence register itself and clears out the associated
|
|
|
+ * data structures in dev_priv and obj_priv.
|
|
|
+ */
|
|
|
+static void
|
|
|
+i915_gem_clear_fence_reg(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
+
|
|
|
+ if (IS_I965G(dev))
|
|
|
+ I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
|
|
|
+ else
|
|
|
+ I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
|
|
|
+
|
|
|
+ dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
|
|
|
+ obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* Finds free space in the GTT aperture and binds the object there.
|
|
|
*/
|
|
@@ -2351,12 +2808,18 @@ int i915_gem_init_object(struct drm_gem_object *obj)
|
|
|
|
|
|
obj->driver_private = obj_priv;
|
|
|
obj_priv->obj = obj;
|
|
|
+ obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
|
|
INIT_LIST_HEAD(&obj_priv->list);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
void i915_gem_free_object(struct drm_gem_object *obj)
|
|
|
{
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
+ struct drm_gem_mm *mm = dev->mm_private;
|
|
|
+ struct drm_map_list *list;
|
|
|
+ struct drm_map *map;
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
|
while (obj_priv->pin_count > 0)
|
|
@@ -2364,6 +2827,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
|
|
i915_gem_object_unbind(obj);
|
|
|
|
|
|
+ list = &obj->map_list;
|
|
|
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
|
|
|
+
|
|
|
+ if (list->file_offset_node) {
|
|
|
+ drm_mm_put_block(list->file_offset_node);
|
|
|
+ list->file_offset_node = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ map = list->map;
|
|
|
+ if (map) {
|
|
|
+ drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
|
|
|
+ list->map = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
|
|
|
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
|
|
|
}
|
|
@@ -2432,8 +2909,7 @@ i915_gem_idle(struct drm_device *dev)
|
|
|
*/
|
|
|
i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
|
|
|
~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
|
|
|
- seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
|
|
|
- I915_GEM_DOMAIN_GTT));
|
|
|
+ seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
|
|
|
|
|
|
if (seqno == 0) {
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
@@ -2758,5 +3234,13 @@ i915_gem_load(struct drm_device *dev)
|
|
|
i915_gem_retire_work_handler);
|
|
|
dev_priv->mm.next_gem_seqno = 1;
|
|
|
|
|
|
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
|
|
|
+ dev_priv->fence_reg_start = 3;
|
|
|
+
|
|
|
+ if (IS_I965G(dev))
|
|
|
+ dev_priv->num_fence_regs = 16;
|
|
|
+ else
|
|
|
+ dev_priv->num_fence_regs = 8;
|
|
|
+
|
|
|
i915_gem_detect_bit_6_swizzle(dev);
|
|
|
}
|