[ovs-dev] [PATCH] ipf: Fix a use-after-free error, and remove the 'do_not_steal' flag.

Aaron Conole aconole at redhat.com
Fri May 21 17:59:05 UTC 2021


As reported by Wang Liang, the way packets are passed to the ipf module
doesn't allow for use later on in reassembly.  Such packets may be get
released anyway, such as during cleanup of tx processing.  Because the
ipf module lacks a way of forcing the dp_packet to be retained, it
will later reuse the packet.  Instead, just clone the packet and let the
ipf queue own the copy until the queue is destroyed.

After this change, there are no more in-tree users of the batch
'dnsteal' flag.  Thus, we remove it as well.

Fixes: 4ea96698f667 ("Userspace datapath: Add fragmentation handling.")
Fixes: 0b3ff31d35f5 ("dp-packet: Add 'do_not_steal' packet batch flag.")
Reported-at: https://mail.openvswitch.org/pipermail/ovs-dev/2021-April/382098.html
Reported-by: Wang Liang <wangliangrt at didiglobal.com>
Signed-off-by: Aaron Conole <aconole at redhat.com>
Co-authored-by: Wang Liang <wangliangrt at didiglobal.com>
Signed-off-by: Wang Liang <wangliangrt at didiglobal.com>
---
 lib/dp-packet.h   |  2 --
 lib/dpif-netdev.c |  1 -
 lib/ipf.c         | 19 ++++++-------------
 3 files changed, 6 insertions(+), 16 deletions(-)

diff --git a/lib/dp-packet.h b/lib/dp-packet.h
index 246be14d00..08d93c2779 100644
--- a/lib/dp-packet.h
+++ b/lib/dp-packet.h
@@ -738,7 +738,6 @@ enum { NETDEV_MAX_BURST = 32 }; /* Maximum number packets in a batch. */
 struct dp_packet_batch {
     size_t count;
     bool trunc; /* true if the batch needs truncate. */
-    bool do_not_steal; /* Indicate that the packets should not be stolen. */
     struct dp_packet *packets[NETDEV_MAX_BURST];
 };
 
@@ -747,7 +746,6 @@ dp_packet_batch_init(struct dp_packet_batch *batch)
 {
     batch->count = 0;
     batch->trunc = false;
-    batch->do_not_steal = false;
 }
 
 static inline void
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 650e67ab30..8fa7eb6d4f 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -4169,7 +4169,6 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
     }
 
     dp_packet_batch_init_packet(&pp, execute->packet);
-    pp.do_not_steal = true;
     dp_netdev_execute_actions(pmd, &pp, false, execute->flow,
                               execute->actions, execute->actions_len);
     dp_netdev_pmd_flush_output_packets(pmd, true);
diff --git a/lib/ipf.c b/lib/ipf.c
index c20bcc0b33..9c83f1913a 100644
--- a/lib/ipf.c
+++ b/lib/ipf.c
@@ -93,7 +93,6 @@ struct ipf_frag {
     struct dp_packet *pkt;
     uint16_t start_data_byte;
     uint16_t end_data_byte;
-    bool dnsteal; /* 'do not steal': if true, ipf should not free packet. */
 };
 
 /* The key for a collection of fragments potentially making up an unfragmented
@@ -795,8 +794,7 @@ ipf_is_frag_duped(const struct ipf_frag *frag_list, int last_inuse_idx,
 static bool
 ipf_process_frag(struct ipf *ipf, struct ipf_list *ipf_list,
                  struct dp_packet *pkt, uint16_t start_data_byte,
-                 uint16_t end_data_byte, bool ff, bool lf, bool v6,
-                 bool dnsteal)
+                 uint16_t end_data_byte, bool ff, bool lf, bool v6)
     OVS_REQUIRES(ipf->ipf_lock)
 {
     bool duped_frag = ipf_is_frag_duped(ipf_list->frag_list,
@@ -811,10 +809,9 @@ ipf_process_frag(struct ipf *ipf, struct ipf_list *ipf_list,
              * recommend not setting the mempool number of buffers too low
              * and also clamp the number of fragments. */
             struct ipf_frag *frag = &ipf_list->frag_list[last_inuse_idx + 1];
-            frag->pkt = pkt;
+            frag->pkt = dp_packet_clone(pkt);
             frag->start_data_byte = start_data_byte;
             frag->end_data_byte = end_data_byte;
-            frag->dnsteal = dnsteal;
             ipf_list->last_inuse_idx++;
             atomic_count_inc(&ipf->nfrag);
             ipf_count(ipf, v6, IPF_NFRAGS_ACCEPTED);
@@ -851,8 +848,7 @@ ipf_list_init(struct ipf_list *ipf_list, struct ipf_list_key *key,
  * to a list of fragemnts. */
 static bool
 ipf_handle_frag(struct ipf *ipf, struct dp_packet *pkt, ovs_be16 dl_type,
-                uint16_t zone, long long now, uint32_t hash_basis,
-                bool dnsteal)
+                uint16_t zone, long long now, uint32_t hash_basis)
     OVS_REQUIRES(ipf->ipf_lock)
 {
     struct ipf_list_key key;
@@ -921,7 +917,7 @@ ipf_handle_frag(struct ipf *ipf, struct dp_packet *pkt, ovs_be16 dl_type,
     }
 
     return ipf_process_frag(ipf, ipf_list, pkt, start_data_byte,
-                            end_data_byte, ff, lf, v6, dnsteal);
+                            end_data_byte, ff, lf, v6);
 }
 
 /* Filters out fragments from a batch of fragments and adjust the batch. */
@@ -942,8 +938,7 @@ ipf_extract_frags_from_batch(struct ipf *ipf, struct dp_packet_batch *pb,
                           ipf_is_valid_v6_frag(ipf, pkt)))) {
 
             ovs_mutex_lock(&ipf->ipf_lock);
-            if (!ipf_handle_frag(ipf, pkt, dl_type, zone, now, hash_basis,
-                                 pb->do_not_steal)) {
+            if (!ipf_handle_frag(ipf, pkt, dl_type, zone, now, hash_basis)) {
                 dp_packet_batch_refill(pb, pkt, pb_idx);
             }
             ovs_mutex_unlock(&ipf->ipf_lock);
@@ -1338,9 +1333,7 @@ ipf_destroy(struct ipf *ipf)
         while (ipf_list->last_sent_idx < ipf_list->last_inuse_idx) {
             struct dp_packet *pkt
                 = ipf_list->frag_list[ipf_list->last_sent_idx + 1].pkt;
-            if (!ipf_list->frag_list[ipf_list->last_sent_idx + 1].dnsteal) {
-                dp_packet_delete(pkt);
-            }
+            dp_packet_delete(pkt);
             atomic_count_dec(&ipf->nfrag);
             ipf_list->last_sent_idx++;
         }
-- 
2.31.1



More information about the dev mailing list