[ovs-dev] [PATCH RFC ovs 06/14] dpif-netdev: Add HW miss packet state recover logic

Eli Britstein elibr at nvidia.com
Wed Dec 9 13:48:24 UTC 2020


Recover the packet if it was partially processed by the HW. Fallback to
lookup flow by mark association.

Signed-off-by: Eli Britstein <elibr at nvidia.com>
---
 lib/dpif-netdev.c | 46 ++++++++++++++++++++++++++++++----------------
 1 file changed, 30 insertions(+), 16 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 3645479ad..06335612d 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -6994,6 +6994,10 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd,
     pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit);
 }
 
+static struct tx_port *
+pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
+                           odp_port_t port_no);
+
 /* Try to process all ('cnt') the 'packets' using only the datapath flow cache
  * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
  * miniflow is copied into 'keys' and the packet pointer is moved at the
@@ -7057,23 +7061,33 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd,
             pkt_metadata_init(&packet->md, port_no);
         }
 
-        if ((*recirc_depth_get() == 0) &&
-            dp_packet_has_flow_mark(packet, &mark)) {
-            flow = mark_to_flow_find(pmd, mark);
-            if (OVS_LIKELY(flow)) {
-                tcp_flags = parse_tcp_flags(packet);
-                if (OVS_LIKELY(batch_enable)) {
-                    dp_netdev_queue_batches(packet, flow, tcp_flags, batches,
-                                            n_batches);
-                } else {
-                    /* Flow batching should be performed only after fast-path
-                     * processing is also completed for packets with emc miss
-                     * or else it will result in reordering of packets with
-                     * same datapath flows. */
-                    packet_enqueue_to_flow_map(packet, flow, tcp_flags,
-                                               flow_map, map_cnt++);
+        if (*recirc_depth_get() == 0) {
+            /* Restore the packet if HW processing was terminated before
+             * completion.
+             */
+            struct tx_port *p;
+
+            tcp_flags = parse_tcp_flags(packet);
+            p = pmd_send_port_cache_lookup(pmd, port_no);
+            if (!p || netdev_hw_miss_packet_recover(p->port->netdev, packet)) {
+                if (dp_packet_has_flow_mark(packet, &mark)) {
+                    flow = mark_to_flow_find(pmd, mark);
+                    if (OVS_LIKELY(flow)) {
+                        if (OVS_LIKELY(batch_enable)) {
+                            dp_netdev_queue_batches(packet, flow, tcp_flags,
+                                                    batches, n_batches);
+                        } else {
+                            /* Flow batching should be performed only after
+                             * fast-path processing is also completed for
+                             * packets with emc miss or else it will result in
+                             * reordering of packets with same datapath flows.
+                             */
+                            packet_enqueue_to_flow_map(packet, flow, tcp_flags,
+                                                       flow_map, map_cnt++);
+                        }
+                        continue;
+                    }
                 }
-                continue;
             }
         }
 
-- 
2.28.0.546.g385c171



More information about the dev mailing list