Skip to content
This repository was archived by the owner on Jan 13, 2024. It is now read-only.

Commit d037a27

Browse files
Sebastian Andrzej SiewiorSteven Rostedt
authored andcommitted
rtmutex: use a trylock for waiter lock in trylock
Mike Galbraith captered the following: | >hardkernel#11 [ffff88017b243e90] _raw_spin_lock at ffffffff815d2596 | >hardkernel#12 [ffff88017b243e90] rt_mutex_trylock at ffffffff815d15be | >hardkernel#13 [ffff88017b243eb0] get_next_timer_interrupt at ffffffff81063b42 | >hardkernel#14 [ffff88017b243f00] tick_nohz_stop_sched_tick at ffffffff810bd1fd | >hardkernel#15 [ffff88017b243f70] tick_nohz_irq_exit at ffffffff810bd7d2 | >hardkernel#16 [ffff88017b243f90] irq_exit at ffffffff8105b02d | >hardkernel#17 [ffff88017b243fb0] reschedule_interrupt at ffffffff815db3dd | >--- <IRQ stack> --- | >hardkernel#18 [ffff88017a2a9bc8] reschedule_interrupt at ffffffff815db3dd | > [exception RIP: task_blocks_on_rt_mutex+51] | >hardkernel#19 [ffff88017a2a9ce0] rt_spin_lock_slowlock at ffffffff815d183c | >hardkernel#20 [ffff88017a2a9da0] lock_timer_base.isra.35 at ffffffff81061cbf | >hardkernel#21 [ffff88017a2a9dd0] schedule_timeout at ffffffff815cf1ce | >hardkernel#22 [ffff88017a2a9e50] rcu_gp_kthread at ffffffff810f9bbb | >hardkernel#23 [ffff88017a2a9ed0] kthread at ffffffff810796d5 | >hardkernel#24 [ffff88017a2a9f50] ret_from_fork at ffffffff815da04c lock_timer_base() does a try_lock() which deadlocks on the waiter lock not the lock itself. This patch takes the waiter_lock with trylock so it should work from interrupt context as well. If the fastpath doesn't work and the waiter_lock itself is taken then it seems that the lock itself taken. This patch also adds a "rt_spin_try_unlock" to keep lockdep happy. If we managed to take the wait_lock in the first place we should also be able to take it in the unlock path. Cc: stable-rt@vger.kernel.org Reported-by: Mike Galbraith <bitbucket@online.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 parent cb40dc0 commit d037a27

3 files changed

Lines changed: 29 additions & 5 deletions

File tree

include/linux/spinlock_rt.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
2222
extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
2323
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
2424
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
25+
extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
2526
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
2627
extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
2728
extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);

kernel/rtmutex.c

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -801,10 +801,8 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
801801
/*
802802
* Slow path to release a rt_mutex spin_lock style
803803
*/
804-
static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
804+
static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
805805
{
806-
raw_spin_lock(&lock->wait_lock);
807-
808806
debug_rt_mutex_unlock(lock);
809807

810808
rt_mutex_deadlock_account_unlock(current);
@@ -823,6 +821,23 @@ static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
823821
rt_mutex_adjust_prio(current);
824822
}
825823

824+
static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
825+
{
826+
raw_spin_lock(&lock->wait_lock);
827+
__rt_spin_lock_slowunlock(lock);
828+
}
829+
830+
static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
831+
{
832+
int ret;
833+
834+
do {
835+
ret = raw_spin_trylock(&lock->wait_lock);
836+
} while (!ret);
837+
838+
__rt_spin_lock_slowunlock(lock);
839+
}
840+
826841
void __lockfunc rt_spin_lock(spinlock_t *lock)
827842
{
828843
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -853,6 +868,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
853868
}
854869
EXPORT_SYMBOL(rt_spin_unlock);
855870

871+
void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
872+
{
873+
/* NOTE: we always pass in '1' for nested, for simplicity */
874+
spin_release(&lock->dep_map, 1, _RET_IP_);
875+
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
876+
}
877+
856878
void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
857879
{
858880
rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
@@ -1056,7 +1078,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
10561078
{
10571079
int ret = 0;
10581080

1059-
raw_spin_lock(&lock->wait_lock);
1081+
if (!raw_spin_trylock(&lock->wait_lock))
1082+
return ret;
10601083
init_lists(lock);
10611084

10621085
if (likely(rt_mutex_owner(lock) != current)) {

kernel/timer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1398,7 +1398,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
13981398
expires = base->next_timer;
13991399
}
14001400
#ifdef CONFIG_PREEMPT_RT_FULL
1401-
rt_spin_unlock(&base->lock);
1401+
rt_spin_unlock_after_trylock_in_irq(&base->lock);
14021402
#else
14031403
spin_unlock(&base->lock);
14041404
#endif

0 commit comments

Comments
 (0)