Skip to content

Commit 6624d17

Browse files
Sebastian Andrzej SiewiorSasha Levin
authored andcommitted
net: Provide a PREEMPT_RT specific check for netdev_queue::_xmit_lock
[ Upstream commit b824c3e ] After acquiring netdev_queue::_xmit_lock the number of the CPU owning the lock is recorded in netdev_queue::xmit_lock_owner. This works as long as the BH context is not preemptible. On PREEMPT_RT the softirq context is preemptible and without the softirq-lock it is possible to have multiple user in __dev_queue_xmit() submitting a skb on the same CPU. This is fine in general but this means also that the current CPU is recorded as netdev_queue::xmit_lock_owner. This in turn leads to the recursion alert and the skb is dropped. Instead checking the for CPU number, that owns the lock, PREEMPT_RT can check if the lockowner matches the current task. Add netif_tx_owned() which returns true if the current context owns the lock by comparing the provided CPU number with the recorded number. This resembles the current check by negating the condition (the current check returns true if the lock is not owned). On PREEMPT_RT use rt_mutex_owner() to return the lock owner and compare the current task against it. Use the new helper in __dev_queue_xmit() and netif_local_xmit_active() which provides a similar check. Update comments regarding pairing READ_ONCE(). Reported-by: Bert Karwatzki <spasswolf@web.de> Closes: https://lore.kernel.org/all/20260216134333.412332-1-spasswolf@web.de Fixes: 3253cb4 ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT") Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reported-by: Bert Karwatzki <spasswolf@web.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://patch.msgid.link/20260302162631.uGUyIqDT@linutronix.de Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent c3e8c75 commit 6624d17

3 files changed

Lines changed: 24 additions & 10 deletions

File tree

include/linux/netdevice.h

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4684,7 +4684,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
46844684
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
46854685
{
46864686
spin_lock(&txq->_xmit_lock);
4687-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4687+
/* Pairs with READ_ONCE() in netif_tx_owned() */
46884688
WRITE_ONCE(txq->xmit_lock_owner, cpu);
46894689
}
46904690

@@ -4702,7 +4702,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
47024702
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
47034703
{
47044704
spin_lock_bh(&txq->_xmit_lock);
4705-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4705+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47064706
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
47074707
}
47084708

@@ -4711,22 +4711,22 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
47114711
bool ok = spin_trylock(&txq->_xmit_lock);
47124712

47134713
if (likely(ok)) {
4714-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4714+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47154715
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
47164716
}
47174717
return ok;
47184718
}
47194719

47204720
static inline void __netif_tx_unlock(struct netdev_queue *txq)
47214721
{
4722-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4722+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47234723
WRITE_ONCE(txq->xmit_lock_owner, -1);
47244724
spin_unlock(&txq->_xmit_lock);
47254725
}
47264726

47274727
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
47284728
{
4729-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4729+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47304730
WRITE_ONCE(txq->xmit_lock_owner, -1);
47314731
spin_unlock_bh(&txq->_xmit_lock);
47324732
}
@@ -4819,6 +4819,23 @@ static inline void netif_tx_disable(struct net_device *dev)
48194819
local_bh_enable();
48204820
}
48214821

4822+
#ifndef CONFIG_PREEMPT_RT
4823+
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4824+
{
4825+
/* Other cpus might concurrently change txq->xmit_lock_owner
4826+
* to -1 or to their cpu id, but not to our id.
4827+
*/
4828+
return READ_ONCE(txq->xmit_lock_owner) == cpu;
4829+
}
4830+
4831+
#else
4832+
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4833+
{
4834+
return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
4835+
}
4836+
4837+
#endif
4838+
48224839
static inline void netif_addr_lock(struct net_device *dev)
48234840
{
48244841
unsigned char nest_level = 0;

net/core/dev.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4758,10 +4758,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
47584758
if (dev->flags & IFF_UP) {
47594759
int cpu = smp_processor_id(); /* ok because BHs are off */
47604760

4761-
/* Other cpus might concurrently change txq->xmit_lock_owner
4762-
* to -1 or to their cpu id, but not to our id.
4763-
*/
4764-
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4761+
if (!netif_tx_owned(txq, cpu)) {
47654762
bool is_list = false;
47664763

47674764
if (dev_xmit_recursion())

net/core/netpoll.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ static int netif_local_xmit_active(struct net_device *dev)
132132
for (i = 0; i < dev->num_tx_queues; i++) {
133133
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
134134

135-
if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
135+
if (netif_tx_owned(txq, smp_processor_id()))
136136
return 1;
137137
}
138138

0 commit comments

Comments
 (0)