tcp: annotate lockless access to tcp_memory_pressure
authorEric Dumazet <edumazet@google.com>
Wed, 9 Oct 2019 22:10:15 +0000 (15:10 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 27 Jan 2020 13:46:50 +0000 (14:46 +0100)
[ Upstream commit 1f142c17d19a5618d5a633195a46f2c8be9bf232 ]

tcp_memory_pressure is read without holding any lock,
and its value could be changed on other cpus.

Use READ_ONCE() to annotate these lockless reads.

The write side is already using atomic ops.

Fixes: b8da51ebb1aa ("tcp: introduce tcp_under_memory_pressure()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/tcp.h
net/ipv4/tcp.c

index 00d10f0e1194997941c89f6f7fe7f524914e54d1..c9630231031491237244c1ec3be1458c5d046de3 100644 (file)
@@ -289,7 +289,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
            mem_cgroup_under_socket_pressure(sk->sk_memcg))
                return true;
 
-       return tcp_memory_pressure;
+       return READ_ONCE(tcp_memory_pressure);
 }
 /*
  * The next routines deal with comparing 32 bit unsigned ints
index 8f07655718f3463853c72b2104eb3c70544eea7b..db1eceda2359cf6f51623e50c19549973e2da1c7 100644 (file)
@@ -328,7 +328,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (tcp_memory_pressure)
+       if (READ_ONCE(tcp_memory_pressure))
                return;
        val = jiffies;
 
@@ -343,7 +343,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (!tcp_memory_pressure)
+       if (!READ_ONCE(tcp_memory_pressure))
                return;
        val = xchg(&tcp_memory_pressure, 0);
        if (val)