summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
authorLyude Paul <lyude@redhat.com>2019-10-16 16:02:59 -0400
committerLyude Paul <lyude@redhat.com>2019-10-24 14:24:40 -0400
commit14692a3637d4f1cd8ccdb8c605222037b3ac3494 (patch)
tree02a8b470dc37b3fd382efcd84d55664d01b5c9e9 /drivers/gpu/drm/drm_dp_mst_topology.c
parent9408cc94eb041d0c2f9f00189a613b94c0449450 (diff)
drm/dp_mst: Add probe_lock
Currently, MST lacks locking in a lot of places that really should have some sort of locking. Hotplugging and link address code paths are some of the offenders here, as there is actually nothing preventing us from running a link address probe while at the same time handling a connection status update request - something that's likely always been possible but never seen in the wild because hotplugging has been broken for ages now (with the exception of amdgpu, for reasons I don't think are worth digging into very far). Note: I'm going to start using the term "in-memory topology layout" here to refer to drm_dp_mst_port->mstb and drm_dp_mst_branch->ports. Locking in these places is a little tougher then it looks though. Generally we protect anything having to do with the in-memory topology layout under &mgr->lock. But this becomes nearly impossible to do from the context of link address probes due to the fact that &mgr->lock is usually grabbed under random various modesetting locks, meaning that there's no way we can just invert the &mgr->lock order and keep it locked throughout the whole process of updating the topology. Luckily there are only two workers which can modify the in-memory topology layout: drm_dp_mst_up_req_work() and drm_dp_mst_link_probe_work(), meaning as long as we prevent these two workers from traveling the topology layout in parallel with the intent of updating it we don't need to worry about grabbing &mgr->lock in these workers for reads. We only need to grab &mgr->lock in these workers for writes, so that readers outside these two workers are still protected from the topology layout changing beneath them. So, add the new &mgr->probe_lock and use it in both drm_dp_mst_link_probe_work() and drm_dp_mst_up_req_work(). Additionally, add some more detailed explanations for how this locking is intended to work to drm_dp_mst_port->mstb and drm_dp_mst_branch->ports. Signed-off-by: Lyude Paul <lyude@redhat.com> Reviewed-by: Sean Paul <sean@poorly.run> Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-6-lyude@redhat.com
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 1aaeeb884b9c..2f87dec31c3c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2147,37 +2147,40 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
- struct drm_dp_mst_branch *mstb_child;
+
if (!mstb->link_address_sent)
drm_dp_send_link_address(mgr, mstb);
list_for_each_entry(port, &mstb->ports, next) {
- if (port->input)
- continue;
+ struct drm_dp_mst_branch *mstb_child = NULL;
- if (!port->ddps)
+ if (port->input || !port->ddps)
continue;
if (!port->available_pbn)
drm_dp_send_enum_path_resources(mgr, mstb, port);
- if (port->mstb) {
+ if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
- if (mstb_child) {
- drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- }
+
+ if (mstb_child) {
+ drm_dp_check_and_send_link_address(mgr, mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
}
}
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ mutex_lock(&mgr->probe_lock);
+
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
@@ -2190,6 +2193,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_mst_topology_put_mstb(mstb);
}
+ mutex_unlock(&mgr->probe_lock);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -3313,6 +3317,7 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
up_req_work);
struct drm_dp_pending_up_req *up_req;
+ mutex_lock(&mgr->probe_lock);
while (true) {
mutex_lock(&mgr->up_req_lock);
up_req = list_first_entry_or_null(&mgr->up_req_list,
@@ -3328,6 +3333,7 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
drm_dp_mst_process_up_req(mgr, up_req);
kfree(up_req);
}
+ mutex_unlock(&mgr->probe_lock);
}
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
@@ -4352,6 +4358,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->payload_lock);
mutex_init(&mgr->delayed_destroy_lock);
mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_port_list);
INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
@@ -4417,6 +4424,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mutex_destroy(&mgr->qlock);
mutex_destroy(&mgr->lock);
mutex_destroy(&mgr->up_req_lock);
+ mutex_destroy(&mgr->probe_lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);