summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c409
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c46
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c7
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c28
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/gpu/vga/vgaarb.c46
42 files changed, 496 insertions, 361 deletions
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index a2cc6be97983..b792194e0d9c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
uint32_t data, jreg;
+ ast_open_key(ast);
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
}
ast->vga2_clone = false;
} else {
- ast->chip = 2000;
+ ast->chip = AST2000;
DRM_INFO("AST 2000 detected\n");
}
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9d7346b92653..6b7efcf363d6 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -250,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index e1c5c3222129..c7c5a9d91fa0 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -555,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+ drm_connector_register(connector);
return connector;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2e7f03ad5ee2..9933c26017ed 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
+ /*
+ * We enable some interrupt sources in our postinstall hooks, so mark
+ * interrupts as enabled _before_ actually enabling them to avoid
+ * special cases in our ordering checks.
+ */
+ dev_priv->pm._irqs_disabled = false;
+
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_gem_stolen;
- dev_priv->pm._irqs_disabled = false;
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a830eac5ba3..3524306d8cfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,7 @@ enum hpd_pin {
if ((1 << (domain)) & (mask))
struct drm_i915_private;
+struct i915_mm_struct;
struct i915_mmu_object;
enum intel_dpll_id {
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
struct i915_gtt gtt; /* VM representing the global address space */
struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
- DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+ DECLARE_HASHTABLE(mm_structs, 7);
+ struct mutex mm_lock;
/* Kernel Modesetting */
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
- struct mm_struct *mm;
- struct i915_mmu_object *mn;
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ba7f5c6bb50d..ad55b06a3cb1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,10 +1590,13 @@ unlock:
out:
switch (ret) {
case -EIO:
- /* If this -EIO is due to a gpu hang, give the reset code a
- * chance to clean up the mess. Otherwise return the proper
- * SIGBUS. */
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ /*
+ * We eat errors when the gpu is terminally wedged to avoid
+ * userspace unduly crashing (gl has no provisions for mmaps to
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
+ * and so needs to be reported.
+ */
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
ret = VM_FAULT_SIGBUS;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index fe69fc837d9e..d38413997379 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -32,6 +32,15 @@
#include <linux/mempolicy.h>
#include <linux/swap.h>
+struct i915_mm_struct {
+ struct mm_struct *mm;
+ struct drm_device *dev;
+ struct i915_mmu_notifier *mn;
+ struct hlist_node node;
+ struct kref kref;
+ struct work_struct work;
+};
+
#if defined(CONFIG_MMU_NOTIFIER)
#include <linux/interval_tree.h>
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
struct mmu_notifier mn;
struct rb_root objects;
struct list_head linear;
- struct drm_device *dev;
- struct mm_struct *mm;
- struct work_struct work;
- unsigned long count;
unsigned long serial;
bool has_linear;
};
struct i915_mmu_object {
- struct i915_mmu_notifier *mmu;
+ struct i915_mmu_notifier *mn;
struct interval_tree_node it;
struct list_head link;
struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct i915_mmu_object *mmu;
+ struct i915_mmu_object *mo;
unsigned long serial;
restart:
serial = mn->serial;
- list_for_each_entry(mmu, &mn->linear, link) {
+ list_for_each_entry(mo, &mn->linear, link) {
struct drm_i915_gem_object *obj;
- if (mmu->it.last < start || mmu->it.start > end)
+ if (mo->it.last < start || mo->it.start > end)
continue;
- obj = mmu->obj;
+ obj = mo->obj;
drm_gem_object_reference(&obj->base);
spin_unlock(&mn->lock);
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
};
static struct i915_mmu_notifier *
-__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_mmu_notifier *mmu;
-
- /* Protected by dev->struct_mutex */
- hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
- if (mmu->mm == mm)
- return mmu;
-
- return NULL;
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+i915_mmu_notifier_create(struct mm_struct *mm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_mmu_notifier *mmu;
+ struct i915_mmu_notifier *mn;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
-
- mmu = __i915_mmu_notifier_lookup(dev, mm);
- if (mmu)
- return mmu;
-
- mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
- if (mmu == NULL)
+ mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+ if (mn == NULL)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&mmu->lock);
- mmu->dev = dev;
- mmu->mn.ops = &i915_gem_userptr_notifier;
- mmu->mm = mm;
- mmu->objects = RB_ROOT;
- mmu->count = 0;
- mmu->serial = 1;
- INIT_LIST_HEAD(&mmu->linear);
- mmu->has_linear = false;
-
- /* Protected by mmap_sem (write-lock) */
- ret = __mmu_notifier_register(&mmu->mn, mm);
+ spin_lock_init(&mn->lock);
+ mn->mn.ops = &i915_gem_userptr_notifier;
+ mn->objects = RB_ROOT;
+ mn->serial = 1;
+ INIT_LIST_HEAD(&mn->linear);
+ mn->has_linear = false;
+
+ /* Protected by mmap_sem (write-lock) */
+ ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
- kfree(mmu);
+ kfree(mn);
return ERR_PTR(ret);
}
- /* Protected by dev->struct_mutex */
- hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
- return mmu;
+ return mn;
}
-static void
-__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
{
- struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
- mmu_notifier_unregister(&mmu->mn, mmu->mm);
- kfree(mmu);
-}
-
-static void
-__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
-{
- lockdep_assert_held(&mmu->dev->struct_mutex);
-
- /* Protected by dev->struct_mutex */
- hash_del(&mmu->node);
-
- /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
- * We enter the function holding struct_mutex, therefore we need
- * to drop our mutex prior to calling mmu_notifier_unregister in
- * order to prevent lock inversion (and system-wide deadlock)
- * between the mmap_sem and struct-mutex. Hence we defer the
- * unregistration to a workqueue where we hold no locks.
- */
- INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
- schedule_work(&mmu->work);
-}
-
-static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
-{
- if (++mmu->serial == 0)
- mmu->serial = 1;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
-{
- struct i915_mmu_object *mn;
-
- list_for_each_entry(mn, &mmu->linear, link)
- if (mn->is_linear)
- return true;
-
- return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
- struct i915_mmu_object *mn)
-{
- lockdep_assert_held(&mmu->dev->struct_mutex);
-
- spin_lock(&mmu->lock);
- list_del(&mn->link);
- if (mn->is_linear)
- mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
- else
- interval_tree_remove(&mn->it, &mmu->objects);
- __i915_mmu_notifier_update_serial(mmu);
- spin_unlock(&mmu->lock);
-
- /* Protected against _add() by dev->struct_mutex */
- if (--mmu->count == 0)
- __i915_mmu_notifier_destroy(mmu);
+ if (++mn->serial == 0)
+ mn->serial = 1;
}
static int
-i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
- struct i915_mmu_object *mn)
+i915_mmu_notifier_add(struct drm_device *dev,
+ struct i915_mmu_notifier *mn,
+ struct i915_mmu_object *mo)
{
struct interval_tree_node *it;
int ret;
- ret = i915_mutex_lock_interruptible(mmu->dev);
+ ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
* remove the objects from the interval tree) before we do
* the check for overlapping objects.
*/
- i915_gem_retire_requests(mmu->dev);
+ i915_gem_retire_requests(dev);
- spin_lock(&mmu->lock);
- it = interval_tree_iter_first(&mmu->objects,
- mn->it.start, mn->it.last);
+ spin_lock(&mn->lock);
+ it = interval_tree_iter_first(&mn->objects,
+ mo->it.start, mo->it.last);
if (it) {
struct drm_i915_gem_object *obj;
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!obj->userptr.workers)
- mmu->has_linear = mn->is_linear = true;
+ mn->has_linear = mo->is_linear = true;
else
ret = -EAGAIN;
} else
- interval_tree_insert(&mn->it, &mmu->objects);
+ interval_tree_insert(&mo->it, &mn->objects);
if (ret == 0) {
- list_add(&mn->link, &mmu->linear);
- __i915_mmu_notifier_update_serial(mmu);
+ list_add(&mo->link, &mn->linear);
+ __i915_mmu_notifier_update_serial(mn);
}
- spin_unlock(&mmu->lock);
- mutex_unlock(&mmu->dev->struct_mutex);
+ spin_unlock(&mn->lock);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+ struct i915_mmu_object *mo;
+
+ list_for_each_entry(mo, &mn->linear, link)
+ if (mo->is_linear)
+ return true;
+
+ return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+ struct i915_mmu_object *mo)
+{
+ spin_lock(&mn->lock);
+ list_del(&mo->link);
+ if (mo->is_linear)
+ mn->has_linear = i915_mmu_notifier_has_linear(mn);
+ else
+ interval_tree_remove(&mo->it, &mn->objects);
+ __i915_mmu_notifier_update_serial(mn);
+ spin_unlock(&mn->lock);
+}
+
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_object *mn;
+ struct i915_mmu_object *mo;
- mn = obj->userptr.mn;
- if (mn == NULL)
+ mo = obj->userptr.mmu_object;
+ if (mo == NULL)
return;
- i915_mmu_notifier_del(mn->mmu, mn);
- obj->userptr.mn = NULL;
+ i915_mmu_notifier_del(mo->mn, mo);
+ kfree(mo);
+
+ obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+ if (mm->mn == NULL) {
+ down_write(&mm->mm->mmap_sem);
+ mutex_lock(&to_i915(mm->dev)->mm_lock);
+ if (mm->mn == NULL)
+ mm->mn = i915_mmu_notifier_create(mm->mm);
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ up_write(&mm->mm->mmap_sem);
+ }
+ return mm->mn;
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
- struct i915_mmu_notifier *mmu;
- struct i915_mmu_object *mn;
+ struct i915_mmu_notifier *mn;
+ struct i915_mmu_object *mo;
int ret;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- down_write(&obj->userptr.mm->mmap_sem);
- ret = i915_mutex_lock_interruptible(obj->base.dev);
- if (ret == 0) {
- mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
- if (!IS_ERR(mmu))
- mmu->count++; /* preemptive add to act as a refcount */
- else
- ret = PTR_ERR(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- }
- up_write(&obj->userptr.mm->mmap_sem);
- if (ret)
- return ret;
+ if (WARN_ON(obj->userptr.mm == NULL))
+ return -EINVAL;
- mn = kzalloc(sizeof(*mn), GFP_KERNEL);
- if (mn == NULL) {
- ret = -ENOMEM;
- goto destroy_mmu;
- }
+ mn = i915_mmu_notifier_find(obj->userptr.mm);
+ if (IS_ERR(mn))
+ return PTR_ERR(mn);
- mn->mmu = mmu;
- mn->it.start = obj->userptr.ptr;
- mn->it.last = mn->it.start + obj->base.size - 1;
- mn->obj = obj;
+ mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+ if (mo == NULL)
+ return -ENOMEM;
- ret = i915_mmu_notifier_add(mmu, mn);
- if (ret)
- goto free_mn;
+ mo->mn = mn;
+ mo->it.start = obj->userptr.ptr;
+ mo->it.last = mo->it.start + obj->base.size - 1;
+ mo->obj = obj;
- obj->userptr.mn = mn;
+ ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+ if (ret) {
+ kfree(mo);
+ return ret;
+ }
+
+ obj->userptr.mmu_object = mo;
return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ if (mn == NULL)
+ return;
-free_mn:
+ mmu_notifier_unregister(&mn->mn, mm);
kfree(mn);
-destroy_mmu:
- mutex_lock(&obj->base.dev->struct_mutex);
- if (--mmu->count == 0)
- __i915_mmu_notifier_destroy(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- return ret;
}
#else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return 0;
}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+}
+
#endif
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+ struct i915_mm_struct *mm;
+
+ /* Protected by dev_priv->mm_lock */
+ hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+ if (mm->mm == real)
+ return mm;
+
+ return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_mm_struct *mm;
+ int ret = 0;
+
+ /* During release of the GEM object we hold the struct_mutex. This
+ * precludes us from calling mmput() at that time as that may be
+ * the last reference and so call exit_mmap(). exit_mmap() will
+ * attempt to reap the vma, and if we were holding a GTT mmap
+ * would then call drm_gem_vm_close() and attempt to reacquire
+ * the struct mutex. So in order to avoid that recursion, we have
+ * to defer releasing the mm reference until after we drop the
+ * struct_mutex, i.e. we need to schedule a worker to do the clean
+ * up.
+ */
+ mutex_lock(&dev_priv->mm_lock);
+ mm = __i915_mm_struct_find(dev_priv, current->mm);
+ if (mm == NULL) {
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (mm == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&mm->kref);
+ mm->dev = obj->base.dev;
+
+ mm->mm = current->mm;
+ atomic_inc(&current->mm->mm_count);
+
+ mm->mn = NULL;
+
+ /* Protected by dev_priv->mm_lock */
+ hash_add(dev_priv->mm_structs,
+ &mm->node, (unsigned long)mm->mm);
+ } else
+ kref_get(&mm->kref);
+
+ obj->userptr.mm = mm;
+out:
+ mutex_unlock(&dev_priv->mm_lock);
+ return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+ struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+ i915_mmu_notifier_free(mm->mn, mm->mm);
+ mmdrop(mm->mm);
+ kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+ struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+ /* Protected by dev_priv->mm_lock */
+ hash_del(&mm->node);
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+ INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+ schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+ if (obj->userptr.mm == NULL)
+ return;
+
+ kref_put_mutex(&obj->userptr.mm->kref,
+ __i915_mm_struct_free,
+ &to_i915(obj->base.dev)->mm_lock);
+ obj->userptr.mm = NULL;
+}
+
struct get_pages_work {
struct work_struct work;
struct drm_i915_gem_object *obj;
struct task_struct *task;
};
-
#if IS_ENABLED(CONFIG_SWIOTLB)
#define swiotlb_active() swiotlb_nr_tbl()
#else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec == NULL)
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec != NULL) {
- struct mm_struct *mm = obj->userptr.mm;
+ struct mm_struct *mm = obj->userptr.mm->mm;
down_read(&mm->mmap_sem);
while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL;
pinned = 0;
- if (obj->userptr.mm == current->mm) {
+ if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
i915_gem_userptr_release__mmu_notifier(obj);
-
- if (obj->userptr.mm) {
- mmput(obj->userptr.mm);
- obj->userptr.mm = NULL;
- }
+ i915_gem_userptr_release__mm_struct(obj);
}
static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
- if (obj->userptr.mn)
+ if (obj->userptr.mmu_object)
return 0;
return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return -ENODEV;
}
- /* Allocate the new object */
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
* at binding. This means that we need to hook into the mmu_notifier
* in order to detect if the mmu is destroyed.
*/
- ret = -ENOMEM;
- if ((obj->userptr.mm = get_task_mm(current)))
+ ret = i915_gem_userptr_init__mm_struct(obj);
+ if (ret == 0)
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int
i915_gem_init_userptr(struct drm_device *dev)
{
-#if defined(CONFIG_MMU_NOTIFIER)
struct drm_i915_private *dev_priv = to_i915(dev);
- hash_init(dev_priv->mmu_notifiers);
-#endif
+ mutex_init(&dev_priv->mm_lock);
+ hash_init(dev_priv->mm_structs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e4d7607da2c4..f29b44c86a2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,16 +334,20 @@
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+
+#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_A (2<<20)
+#define BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
#define BLT_DEPTH_8 (0<<24)
#define BLT_DEPTH_16_565 (1<<24)
#define BLT_DEPTH_16_1555 (2<<24)
#define BLT_DEPTH_32 (3<<24)
-#define BLT_ROP_GXCOPY (0xcc<<16)
+#define BLT_ROP_SRC_COPY (0xcc<<16)
+#define BLT_ROP_COLOR_COPY (0xf0<<16)
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 81d7681faa63..fdff1d420c14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+ tmp & DP_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
pipe_config->has_dp_encoder = true;
intel_dp_get_m_n(crtc, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f9151f6641d9..ca34de7f6a7b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp, flags = 0;
int dotclock;
@@ -734,6 +735,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_audio = true;
+ if (!HAS_PCH_SPLIT(dev) &&
+ tmp & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
pipe_config->adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 16371a444426..47a126a0493f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
+ u32 cs_offset = ring->scratch.gtt_offset;
int ret;
- if (flags & I915_DISPATCH_PINNED) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- } else {
- u32 cs_offset = ring->scratch.gtt_offset;
+ /* Evict the invalid PTE TLBs */
+ intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0xdeadbeef);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ if ((flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(ring, 9+3);
+ ret = intel_ring_begin(ring, 6 + 2);
if (ret)
return ret;
- /* Blit the batch (which has now all relocs applied) to the stable batch
- * scratch bo area (so that the CS never stumbles over its tlb
- * invalidation bug) ... */
- intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
- XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+
+ /* Blit the batch (which has now all relocs applied) to the
+ * stable batch scratch bo area (so that the CS never
+ * stumbles over its tlb invalidation bug) ...
+ */
+ intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+ intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
+
intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
/* ... and execute it. */
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, cs_offset + len - 8);
- intel_ring_advance(ring);
+ offset = cs_offset;
}
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
return 0;
}
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c69d3ce1b3d6..c14341ca3ef9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+ intel_wait_for_vblank(encoder->base.dev,
+ to_intel_crtc(encoder->base.crtc)->pipe);
+
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a125a7e32742..c6c9b02e0ada 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
priv->hdmi_pdev = pdev;
}
+#ifdef CONFIG_OF
+static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+{
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+ char name2[32];
+ snprintf(name2, sizeof(name2), "%s-gpio", name);
+ gpio = of_get_named_gpio(of_node, name2, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+ }
+ return gpio;
+}
+#endif
+
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
- int get_gpio(const char *name)
- {
- int gpio = of_get_named_gpio(of_node, name, 0);
- if (gpio < 0) {
- char name2[32];
- snprintf(name2, sizeof(name2), "%s-gpio", name);
- gpio = of_get_named_gpio(of_node, name2, 0);
- if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
- gpio = -1;
- }
- }
- return gpio;
- }
-
if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
config.mmio_name = "core_physical";
- config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
- config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
- config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
- config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
- config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
- config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
+ config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+ config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+ config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+ config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+ config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+ config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
#else
static const char *hpd_clk_names[] = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 902d7685d441..f408b69486a8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,19 +15,25 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifdef CONFIG_COMMON_CLK
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#endif
#include "hdmi.h"
struct hdmi_phy_8960 {
struct hdmi_phy base;
struct hdmi *hdmi;
+#ifdef CONFIG_COMMON_CLK
struct clk_hw pll_hw;
struct clk *pll;
unsigned long pixclk;
+#endif
};
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+#ifdef CONFIG_COMMON_CLK
#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
/*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
};
-
+#endif
/*
* HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
{
struct hdmi_phy_8960 *phy_8960;
struct hdmi_phy *phy = NULL;
- int ret, i;
+ int ret;
+#ifdef CONFIG_COMMON_CLK
+ int i;
/* sanity check: */
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
return ERR_PTR(-EINVAL);
+#endif
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->hdmi = hdmi;
+#ifdef CONFIG_COMMON_CLK
phy_8960->pll_hw.init = &pll_init;
phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->pll = NULL;
goto fail;
}
+#endif
return phy;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 26ee80db17af..fcf95680413d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
#define reglog 0
#endif
-static char *vram;
+static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
module_param(vram, charp, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 0a44459844e3..05a278bab247 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object)
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
- nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
if (priv->bar[0].mem)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index b19a2b3c1081..32f28dc73ef2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object)
if (priv->r100c10_page)
nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+ nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index b54b582e72c4..d5d65285efe5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -98,6 +98,7 @@ static int
gf100_ltc_init(struct nouveau_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
+ u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
int ret;
ret = nvkm_ltc_init(priv);
@@ -107,6 +108,7 @@ gf100_ltc_init(struct nouveau_object *object)
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
index ea716569745d..b39b5d0eb8f9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
@@ -28,6 +28,7 @@ static int
gk104_ltc_init(struct nouveau_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
+ u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
int ret;
ret = nvkm_ltc_init(priv);
@@ -37,6 +38,7 @@ gk104_ltc_init(struct nouveau_object *object)
nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
nv_wr32(priv, 0x17e000, priv->ltc_nr);
nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index 4761b2e9af00..a4de64289762 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -98,6 +98,7 @@ static int
gm107_ltc_init(struct nouveau_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
+ u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
int ret;
ret = nvkm_ltc_init(priv);
@@ -106,6 +107,7 @@ gm107_ltc_init(struct nouveau_object *object)
nv_wr32(priv, 0x17e27c, priv->ltc_nr);
nv_wr32(priv, 0x17e278, priv->tag_base);
+ nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 279206997e5c..622424692b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -46,7 +46,6 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
- acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -222,10 +221,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM")) {
- nouveau_dsm_priv.other_handle = dhandle;
+ if (!acpi_has_method(dhandle, "_DSM"))
return false;
- }
+
if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -301,16 +299,6 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
- /*
- * On some systems hotplug events are generated for the device
- * being switched off when _DSM is executed. They cause ACPI
- * hotplug to trigger and attempt to remove the device from
- * the system, which causes it to break down. Prevent that from
- * happening by setting the no_hotplug flag for the involved
- * ACPI device objects.
- */
- acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
- acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 250a5e88c751..9c3af96a7153 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -627,6 +627,7 @@ int nouveau_pmops_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 18d55d447248..c7592ec8ecb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -108,7 +108,16 @@ void
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
+ bool runtime = false;
+
+ if (nouveau_runtime_pm == 1)
+ runtime = true;
+ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+ runtime = true;
+
vga_switcheroo_unregister_client(dev->pdev);
+ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+ vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b1e11f8434e2..ac14b67621d3 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
u8 msg[DP_DPCD_SIZE];
int ret;
- char dpcd_hex_dump[DP_DPCD_SIZE * 3];
-
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
radeon_dp_probe_oui(radeon_connector);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 192278bc993c..c4ffa54b1e3d 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
{
int r;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- RREG32(SRBM_SOFT_RESET);
-
r = cik_sdma_load_microcode(rdev);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 8b58e11b64fa..67cb472d188c 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -33,6 +33,8 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+ bool enable);
static void kv_init_graphics_levels(struct radeon_device *rdev);
static int kv_calculate_ds_divider(struct radeon_device *rdev);
static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
{
kv_smc_bapm_enable(rdev, false);
+ if (rdev->family == CHIP_MULLINS)
+ kv_enable_nb_dpm(rdev, false);
+
/* powerup blocks */
kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false);
@@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
return ret;
}
-static int kv_enable_nb_dpm(struct radeon_device *rdev)
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+ bool enable)
{
struct kv_power_info *pi = kv_get_pi(rdev);
int ret = 0;
- if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
- ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
- if (ret == 0)
- pi->nb_dpm_enabled = true;
+ if (enable) {
+ if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = true;
+ }
+ } else {
+ if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = false;
+ }
}
return ret;
@@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
kv_update_sclk_t(rdev);
if (rdev->family == CHIP_MULLINS)
- kv_enable_nb_dpm(rdev);
+ kv_enable_nb_dpm(rdev, true);
}
} else {
if (pi->enable_dpm) {
@@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev);
- kv_enable_nb_dpm(rdev);
+ kv_enable_nb_dpm(rdev, true);
}
}
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 8a3e6221cece..f26f0a9fb522 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
u32 reg_offset, wb_offset;
int i, r;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
for (i = 0; i < 2; i++) {
if (i == 0) {
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 4c5ec44ff328..b0098e792e62 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
(void)RREG32(RADEON_CP_RB_WPTR);
}
-/**
- * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
- * rdev: radeon device structure
- * ring: ring buffer struct for emitting packets
- */
-void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-}
-
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e616eb5f6e7a..3cfb50056f7a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
- /* PFP_SYNC_ME packet only exists on 7xx+ */
- if (emit_wait && (rdev->family >= CHIP_RV770)) {
+ /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+ if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 51fd98553eaf..a908daa006d2 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 0c4a7d8d93e0..31e1052ad3e3 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -44,13 +44,6 @@
#define R6XX_MAX_PIPES 8
#define R6XX_MAX_PIPES_MASK 0xff
-/* PTE flags */
-#define PTE_VALID (1 << 0)
-#define PTE_SYSTEM (1 << 1)
-#define PTE_SNOOPED (1 << 2)
-#define PTE_READABLE (1 << 5)
-#define PTE_WRITEABLE (1 << 6)
-
/* tiling bits */
#define ARRAY_LINEAR_GENERAL 0x00000000
#define ARRAY_LINEAR_ALIGNED 0x00000001
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index eeeeabe09758..2dd5847f9b98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
- .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r100_asic = {
@@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
- .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r300_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 275a5dc01780..7756bc1e1cd3 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
-void r100_ring_hdp_flush(struct radeon_device *rdev,
- struct radeon_ring *ring);
+
/*
* r200,rv250,rs300,rv280
*/
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 92b2d8dd4735..e74c7e387dde 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+ if ((dev->pdev->device == 0x9805) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+ return false;
+ }
return true;
}
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index a9fb0d016d38..8bc7d0bbd3c8 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,7 +33,6 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
- acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@@ -453,10 +452,9 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
- if (ACPI_FAILURE(status)) {
- radeon_atpx_priv.other_handle = dhandle;
+ if (ACPI_FAILURE(status))
return false;
- }
+
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@@ -540,16 +538,6 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
- /*
- * On some systems hotplug events are generated for the device
- * being switched off when ATPX is executed. They cause ACPI
- * hotplug to trigger and attempt to remove the device from
- * the system, which causes it to break down. Prevent that from
- * happening by setting the no_hotplug flag for the involved
- * ACPI device objects.
- */
- acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
- acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6a219bcee66d..75223dd3a8a3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1393,7 +1393,7 @@ int radeon_device_init(struct radeon_device *rdev,
r = radeon_init(rdev);
if (r)
- return r;
+ goto failed;
r = radeon_ib_ring_tests(rdev);
if (r)
@@ -1413,7 +1413,7 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_agp_disable(rdev);
r = radeon_init(rdev);
if (r)
- return r;
+ goto failed;
}
if ((radeon_testing & 1)) {
@@ -1435,6 +1435,11 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
+
+failed:
+ if (runtime)
+ vga_switcheroo_fini_domain_pm_ops(rdev->dev);
+ return r;
}
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
@@ -1455,6 +1460,8 @@ void radeon_device_fini(struct radeon_device *rdev)
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
vga_switcheroo_unregister_client(rdev->pdev);
+ if (rdev->flags & RADEON_IS_PX)
+ vga_switcheroo_fini_domain_pm_ops(rdev->dev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
pci_iounmap(rdev->pdev, rdev->rio_mem);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8df888908833..4126fd0937a2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -83,7 +83,7 @@
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
* 2.39.0 - Add INFO query for number of active CUs
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
- * CS to GPU
+ * CS to GPU on >= r600
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 40
@@ -440,6 +440,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
ret = radeon_suspend_kms(drm_dev, false, false);
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 56d9fd66d8ae..abd6753a570a 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,7 +34,7 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- uint32_t *cpu_addr;
+ uint64_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 6c1fc339d228..c5799f16aa4b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
if (flags & RADEON_GART_PAGE_READ)
- addr |= RS400_PTE_READABLE;
+ entry |= RS400_PTE_READABLE;
if (flags & RADEON_GART_PAGE_WRITE)
- addr |= RS400_PTE_WRITEABLE;
+ entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index ef93156a69c6..b22968c08d1f 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -298,7 +298,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
val = frame[0xC];
- val |= frame[0xD] << 8;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
/* Enable transmission slot for AVI infoframe
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 6866448083b2..37ac7b5dbd06 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
}
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
+{
+ dev->pm_domain = NULL;
+}
+EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
+
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d2077f040f3e..77711623b973 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -41,6 +41,7 @@
#include <linux/poll.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
+#include <linux/screen_info.h>
#include <linux/uaccess.h>
@@ -112,10 +113,8 @@ both:
return 1;
}
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
/* this is only used a cookie - it should not be dereferenced */
static struct pci_dev *vga_default;
-#endif
static void vga_arb_device_card_gone(struct pci_dev *pdev);
@@ -131,7 +130,6 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
}
/* Returns the default VGA device (vgacon's babe) */
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
struct pci_dev *vga_default_device(void)
{
return vga_default;
@@ -147,7 +145,6 @@ void vga_set_default_device(struct pci_dev *pdev)
pci_dev_put(vga_default);
vga_default = pci_dev_get(pdev);
}
-#endif
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
{
@@ -583,11 +580,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Deal with VGA default device. Use first enabled one
* by default if arch doesn't have it's own hook
*/
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == NULL &&
- ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+ ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
+ pr_info("vgaarb: setting as boot device: PCI:%s\n",
+ pci_name(pdev));
vga_set_default_device(pdev);
-#endif
+ }
vga_arbiter_check_bridge_sharing(vgadev);
@@ -621,10 +619,8 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
goto bail;
}
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == pdev)
vga_set_default_device(NULL);
-#endif
if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count--;
@@ -1320,6 +1316,38 @@ static int __init vga_arb_device_init(void)
pr_info("vgaarb: loaded\n");
list_for_each_entry(vgadev, &vga_list, list) {
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ /* Override I/O based detection done by vga_arbiter_add_pci_device()
+ * as it may take the wrong device (e.g. on Apple system under EFI).
+ *
+ * Select the device owning the boot framebuffer if there is one.
+ */
+ resource_size_t start, end;
+ int i;
+
+ /* Does firmware framebuffer belong to us? */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+ continue;
+
+ start = pci_resource_start(vgadev->pdev, i);
+ end = pci_resource_end(vgadev->pdev, i);
+
+ if (!start || !end)
+ continue;
+
+ if (screen_info.lfb_base < start ||
+ (screen_info.lfb_base + screen_info.lfb_size) >= end)
+ continue;
+ if (!vga_default_device())
+ pr_info("vgaarb: setting as boot device: PCI:%s\n",
+ pci_name(vgadev->pdev));
+ else if (vgadev->pdev != vga_default_device())
+ pr_info("vgaarb: overriding boot device: PCI:%s\n",
+ pci_name(vgadev->pdev));
+ vga_set_default_device(vgadev->pdev);
+ }
+#endif
if (vgadev->bridge_has_one_vga)
pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
else