summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c360
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c17
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c10
6 files changed, 274 insertions, 166 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7b37c198cb19..af9ff40b135b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
+ uint32_t setup_seqno;
};
struct sdvo_device_mapping {
@@ -752,6 +753,7 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode : 2;
+ unsigned int tiling_changed : 1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -1121,10 +1123,10 @@ i915_gem_next_request_seqno(struct drm_device *dev,
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
-int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
- bool interruptible);
-int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c3e6d7bda6e1..23d2417a3585 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -47,7 +47,8 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_obje
static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
-static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -684,7 +685,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
goto out_unpin_pages;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin_pages;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin_pages;
@@ -966,14 +971,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj->tiling_mode == I915_TILING_NONE &&
- obj->gtt_space &&
+ else if (obj->gtt_space &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin;
@@ -1205,12 +1213,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
- /* Need a new fence register? */
- if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, true);
- if (ret)
- goto unlock;
- }
+ if (obj->tiling_mode == I915_TILING_NONE)
+ ret = i915_gem_object_put_fence(obj);
+ else
+ ret = i915_gem_object_get_fence(obj, NULL, true);
+ if (ret)
+ goto unlock;
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
@@ -1608,7 +1616,6 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
list_del_init(&obj->ring_list);
obj->last_rendering_seqno = 0;
- obj->last_fenced_seqno = 0;
}
static void
@@ -1640,7 +1647,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
- obj->last_fenced_ring = NULL;
obj->active = 0;
obj->pending_gpu_write = false;
@@ -1803,7 +1809,11 @@ static void i915_gem_reset_fences(struct drm_device *dev)
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
- i915_gem_clear_fence_reg(obj);
+ reg->obj->fence_reg = I915_FENCE_REG_NONE;
+ reg->obj->fenced_gpu_access = false;
+ reg->obj->last_fenced_seqno = 0;
+ reg->obj->last_fenced_ring = NULL;
+ i915_gem_clear_fence_reg(dev, reg);
}
}
@@ -2114,8 +2124,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
/* release the fence reg _after_ flushing */
- if (obj->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == -ERESTARTSYS)
+ return ret;
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2357,59 +2368,118 @@ static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
return 0;
}
-static int i915_find_fence_reg(struct drm_device *dev,
- bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
+{
+ int ret;
+
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev,
+ obj->last_fenced_ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+
+ if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ obj->last_fenced_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ obj->last_fenced_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ obj->last_fenced_seqno = 0;
+ obj->last_fenced_ring = NULL;
+ }
+
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ ret = i915_gem_object_flush_fence(obj, NULL, true);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ i915_gem_clear_fence_reg(obj->base.dev,
+ &dev_priv->fence_regs[obj->fence_reg]);
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+ struct intel_ring_buffer *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg;
- struct drm_i915_gem_object *obj = NULL;
- int i, avail, ret;
+ struct drm_i915_fence_reg *reg, *first, *avail;
+ int i;
/* First try to find a free reg */
- avail = 0;
+ avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
- return i;
+ return reg;
if (!reg->obj->pin_count)
- avail++;
+ avail = reg;
}
- if (avail == 0)
- return -ENOSPC;
+ if (avail == NULL)
+ return NULL;
/* None available, try to steal one or wait for a user to finish */
- avail = I915_FENCE_REG_NONE;
- list_for_each_entry(reg, &dev_priv->mm.fence_list,
- lru_list) {
- obj = reg->obj;
- if (obj->pin_count)
+ avail = first = NULL;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->obj->pin_count)
continue;
- /* found one! */
- avail = obj->fence_reg;
- break;
- }
+ if (first == NULL)
+ first = reg;
- BUG_ON(avail == I915_FENCE_REG_NONE);
+ if (!pipelined ||
+ !reg->obj->last_fenced_ring ||
+ reg->obj->last_fenced_ring == pipelined) {
+ avail = reg;
+ break;
+ }
+ }
- /* We only have a reference on obj from the active list. put_fence_reg
- * might drop that one, causing a use-after-free in it. So hold a
- * private reference to obj like the other callers of put_fence_reg
- * (set_tiling ioctl) do. */
- drm_gem_object_reference(&obj->base);
- ret = i915_gem_object_put_fence_reg(obj, interruptible);
- drm_gem_object_unreference(&obj->base);
- if (ret != 0)
- return ret;
+ if (avail == NULL)
+ avail = first;
return avail;
}
/**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
* @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -2421,52 +2491,119 @@ static int i915_find_fence_reg(struct drm_device *dev,
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg = NULL;
- struct intel_ring_buffer *pipelined = NULL;
+ struct drm_i915_fence_reg *reg;
int ret;
- /* Just update our place in the LRU if our fence is getting used. */
+ /* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (!pipelined) {
+ if (reg->setup_seqno) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ reg->setup_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ reg->setup_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ reg->setup_seqno = 0;
+ }
+ } else if (obj->last_fenced_ring &&
+ obj->last_fenced_ring != pipelined) {
+ ret = i915_gem_object_flush_fence(obj,
+ pipelined,
+ interruptible);
+ if (ret)
+ return ret;
+ } else if (obj->tiling_changed) {
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+ }
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+ BUG_ON(!pipelined && reg->setup_seqno);
+
+ if (obj->tiling_changed) {
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(dev, pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+ goto update;
+ }
+
return 0;
}
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- WARN(1, "allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (!obj->stride)
- return -EINVAL;
- WARN((obj->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj->gtt_offset);
- break;
- case I915_TILING_Y:
- if (!obj->stride)
- return -EINVAL;
- WARN((obj->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj->gtt_offset);
- break;
- }
+ reg = i915_find_fence_reg(dev, pipelined);
+ if (reg == NULL)
+ return -ENOSPC;
- ret = i915_find_fence_reg(dev, interruptible);
- if (ret < 0)
+ ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ if (ret)
return ret;
- obj->fence_reg = ret;
- reg = &dev_priv->fence_regs[obj->fence_reg];
- list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ drm_gem_object_reference(&old->base);
+
+ if (old->tiling_mode)
+ i915_gem_release_mmap(old);
+
+ /* XXX The pipelined change over appears to be incoherent. */
+ ret = i915_gem_object_flush_fence(old,
+ NULL, //pipelined,
+ interruptible);
+ if (ret) {
+ drm_gem_object_unreference(&old->base);
+ return ret;
+ }
+
+ if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ old->fence_reg = I915_FENCE_REG_NONE;
+ old->last_fenced_ring = pipelined;
+ old->last_fenced_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+ drm_gem_object_unreference(&old->base);
+ } else if (obj->last_fenced_seqno == 0)
+ pipelined = NULL;
reg->obj = obj;
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ obj->fence_reg = reg - dev_priv->fence_regs;
+ obj->last_fenced_ring = pipelined;
+ reg->setup_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+ obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 6:
ret = sandybridge_write_fence_reg(obj, pipelined);
@@ -2497,87 +2634,34 @@ i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
* data structures in dev_priv and obj.
*/
static void
-i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg];
- uint32_t fence_reg;
+ uint32_t fence_reg = reg - dev_priv->fence_regs;
switch (INTEL_INFO(dev)->gen) {
case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
- (obj->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
case 5:
case 4:
- I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
break;
case 3:
- if (obj->fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4;
+ if (fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
else
case 2:
- fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4;
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
I915_WRITE(fence_reg, 0);
break;
}
- reg->obj = NULL;
- obj->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
- bool interruptible)
-{
- struct drm_device *dev = obj->base.dev;
- int ret;
-
- if (obj->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
- /* On the i915, GPU access to tiled buffers is via a fence,
- * therefore we must wait for any outstanding access to complete
- * before clearing the fence.
- */
- if (obj->fenced_gpu_access) {
- i915_gem_object_flush_gpu_write_domain(obj);
- obj->fenced_gpu_access = false;
- }
-
- if (obj->last_fenced_seqno) {
- ret = i915_do_wait_request(dev,
- obj->last_fenced_seqno,
- interruptible,
- obj->last_fenced_ring);
- if (ret)
- return ret;
-
- obj->last_fenced_seqno = false;
- }
-
- i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg(obj);
-
- return 0;
+ reg->obj = NULL;
+ reg->setup_seqno = 0;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index af01a58a643b..9bdc495e17bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -424,7 +424,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
}
static int
-i915_gem_execbuffer_reserve(struct drm_device *dev,
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects,
struct drm_i915_gem_exec_object2 *exec)
@@ -499,10 +499,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
}
if (need_fence) {
- ret = i915_gem_object_get_fence_reg(obj, true);
+ ret = i915_gem_object_get_fence(obj, ring, 1);
+ if (ret)
+ break;
+ } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ /* XXX pipelined! */
+ ret = i915_gem_object_put_fence(obj);
if (ret)
break;
-
}
obj->pending_fenced_gpu_access = need_fence;
@@ -522,7 +527,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
/* First attempt, just clear anything that is purgeable.
* Second attempt, clear the entire GTT.
*/
- ret = i915_gem_evict_everything(dev, retry == 0);
+ ret = i915_gem_evict_everything(ring->dev, retry == 0);
if (ret)
return ret;
@@ -548,6 +553,7 @@ err:
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
+ struct intel_ring_buffer *ring,
struct list_head *objects,
struct drm_i915_gem_exec_object2 *exec,
int count)
@@ -590,7 +596,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
}
- ret = i915_gem_execbuffer_reserve(dev, file, objects, exec);
+ ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
if (ret)
goto err;
@@ -930,7 +936,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(dev, file, &objects, exec);
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
if (ret)
goto err;
@@ -938,7 +944,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_execbuffer_relocate(dev, file, &objects, exec);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file,
+ ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1c5fdb30f272..22a32b9932c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -244,9 +244,6 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (!obj->gtt_space)
- return true;
-
if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
@@ -345,27 +342,21 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
- if (!i915_gem_object_fence_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
- else if (obj->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj, true);
- else
- i915_gem_release_mmap(obj);
+ i915_gem_release_mmap(obj);
- if (ret != 0) {
- args->tiling_mode = obj->tiling_mode;
- args->stride = obj->stride;
- goto err;
- }
+ obj->map_and_fenceable =
+ obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
+ obj->tiling_changed = true;
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
}
-err:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c2c94a26f92e..e141dd2e46e5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1474,7 +1474,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, false);
+ ret = i915_gem_object_get_fence(obj, pipelined, false);
if (ret)
goto err_unpin;
}
@@ -4370,6 +4370,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
@@ -4382,6 +4388,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
@@ -4966,6 +4978,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
i915_gem_object_unpin(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
+
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -5009,10 +5022,12 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
obj = work->old_fb_obj;
+
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
+
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index af715cc03ee0..d0c1add393a3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -787,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
goto out_unpin;
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
if (!overlay->active) {
regs = intel_overlay_map_regs(overlay);
if (!regs) {
@@ -1161,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = intel_overlay_recover_from_interrupt(overlay, true);
if (ret != 0)
goto out_unlock;