netfilter: nf_queue: fix possible use-after-free
authorFlorian Westphal <fw@strlen.de>
Mon, 28 Feb 2022 05:22:22 +0000 (06:22 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Mar 2022 18:07:47 +0000 (19:07 +0100)
commit c3873070247d9e3c7a6b0cf9bf9b45e8018427b1 upstream.

Eric Dumazet says:
  The sock_hold() side seems suspect, because there is no guarantee
  that sk_refcnt is not already 0.

On failure, we cannot queue the packet and need to indicate an
error.  The packet will be dropped by the caller.

v2: split skb prefetch hunk into separate change

Fixes: 271b72c7fa82c ("udp: RCU handling for Unicast packets.")
Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/net/netfilter/nf_queue.h
net/netfilter/nf_queue.c
net/netfilter/nfnetlink_queue.c

index 47088083667b2f3e64cca07496543f42e3fdfb01..c204af20c27e422c0224f41939e6234f89c43a52 100644 (file)
@@ -34,7 +34,7 @@ void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *q
 void nf_unregister_queue_handler(struct net *net);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
 
 static inline void init_hashrandom(u32 *jhash_initval)
index ee3b57c25a6ac46cc439f483a956e366965a4a9b..643dbfe7c5815be66cd9e0c3b98e65a742f70d09 100644 (file)
@@ -108,18 +108,20 @@ static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
 }
 
 /* Bump dev refs so they don't vanish while packet is out */
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 {
        struct nf_hook_state *state = &entry->state;
 
+       if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
+               return false;
+
        if (state->in)
                dev_hold(state->in);
        if (state->out)
                dev_hold(state->out);
-       if (state->sk)
-               sock_hold(state->sk);
 
        nf_queue_entry_get_br_nf_refs(entry->skb);
+       return true;
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
@@ -210,7 +212,10 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
                .size   = sizeof(*entry) + route_key_size,
        };
 
-       nf_queue_entry_get_refs(entry);
+       if (!nf_queue_entry_get_refs(entry)) {
+               kfree(entry);
+               return -ENOTCONN;
+       }
 
        switch (entry->state.pf) {
        case AF_INET:
index ca21f8f4a47c1042bbb983d70760cbde0d84c9b7..7d3ab08a5a2d063c8aa043e87da30ece6a7b5e2f 100644 (file)
@@ -712,9 +712,15 @@ static struct nf_queue_entry *
 nf_queue_entry_dup(struct nf_queue_entry *e)
 {
        struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
-       if (entry)
-               nf_queue_entry_get_refs(entry);
-       return entry;
+
+       if (!entry)
+               return NULL;
+
+       if (nf_queue_entry_get_refs(entry))
+               return entry;
+
+       kfree(entry);
+       return NULL;
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)