summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-01-31 16:34:26 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-31 16:49:43 -0800
commit3fa97c9db4f6f93f41f7a40d08872dbfd8dc907e (patch)
tree6a71aea65a6854f6ab16dc2f8cadf885f01b927e
parentb8c475be7bf9b79e6417c08d7a921b2e8cb04258 (diff)
[PATCH] "Fix uidhash_lock <-> RXU deadlock" fix
I get storms of warnings from local_bh_enable(). Better-tested patches, please. Cc: Ingo Molnar <mingo@elte.hu> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/user.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/kernel/user.c b/kernel/user.c
index d1ae2349347e..d9deae43a9ab 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -33,6 +33,10 @@ static struct list_head uidhash_table[UIDHASH_SZ];
* The uidhash_lock is mostly taken from process context, but it is
* occasionally also taken from softirq/tasklet context, when
* task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ * But free_uid() is also called with local interrupts disabled, and running
+ * local_bh_enable() with local interrupts disabled is an error - we'll run
+ * softirq callbacks, and they can unconditionally enable interrupts, and
+ * the caller of free_uid() didn't expect that..
*/
static DEFINE_SPINLOCK(uidhash_lock);
@@ -89,16 +93,19 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has
struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
+ unsigned long flags;
- spin_lock_bh(&uidhash_lock);
+ spin_lock_irqsave(&uidhash_lock, flags);
ret = uid_hash_find(uid, uidhashentry(uid));
- spin_unlock_bh(&uidhash_lock);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}
void free_uid(struct user_struct *up)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
key_put(up->uid_keyring);
@@ -106,7 +113,7 @@ void free_uid(struct user_struct *up)
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
- local_bh_enable();
+ local_irq_restore(flags);
}
struct user_struct * alloc_uid(uid_t uid)
@@ -114,9 +121,9 @@ struct user_struct * alloc_uid(uid_t uid)
struct list_head *hashent = uidhashentry(uid);
struct user_struct *up;
- spin_lock_bh(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
- spin_unlock_bh(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);
if (!up) {
struct user_struct *new;
@@ -146,7 +153,7 @@ struct user_struct * alloc_uid(uid_t uid)
* Before adding this, check whether we raced
* on adding the same user already..
*/
- spin_lock_bh(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
@@ -156,7 +163,7 @@ struct user_struct * alloc_uid(uid_t uid)
uid_hash_insert(new, hashent);
up = new;
}
- spin_unlock_bh(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);
}
return up;
@@ -192,9 +199,9 @@ static int __init uid_cache_init(void)
INIT_LIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
- spin_lock_bh(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(0));
- spin_unlock_bh(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);
return 0;
}