[ovs-dev] [PATCH v1] dpif: Add marked packets stats

Ophir Munk ophirmu at mellanox.com
Tue Mar 12 16:18:29 UTC 2019


> >
> > Not sure if I understand your point correctly, but isn't it expected
> > that if flow is offloaded than all the packets through the flow had flow
> mark?

Not always. For fully HW offloaded traffic - packets will not be marked.
It is planned to have another HW offload counter (which is different from the mark counter).

> > In case of a bug it's most likely that it happened after the flow mark
> > association and your flow stats will be wrong anyway.
> > The only trusted source of the number of "offloaded" packets could be
> > the number of real packets that had flow mark set on receive. And
> > these stats should be displayed by PMD stats.
> >

Not sure about that. It seems there is no one to one relationship between a PMD and a flow.
In order to correctly debug (how many offloaded packets actually got a mark) 
I think you need a flow resolution statistics.

> > If everything works as expected, all the packets for offloaded flow
> > will be marked (except first few of them).
> > If something will go wrong, we can't trust these flow stats. We can
> > rely only
> Why you can't trust the flow stats? This is the only point in the code where
> you know that the flow was really handled by hardware.
> 
> > on PMD stats.
> > So, I don't see the profit from having these special flow stats.
> >
> I'm looking at it from the OVS user point of view. I have thousands of flows
> running.
> If the statistics is only in the PMD I can see how many packets were marked
> and how many we're not. Let's say the ratio is not that good, how do I look
> deeper?
> Is it because for some flows there is bug on matching? Is it because of the
> latency in adding flows?
> Is it because some flow pattern is not supported at all?
> Agree this is only needed for debugging, but I don't see other way user can
> get this information.
> 

Do we agree that knowing how many marked packets per flow were received is something valuable?
It should be used to confirm correct functionality and improve performance for offloaded traffic.
If we agree then we should discuss how to implement it. 
I don't think that PMD level statistics can be used.
Any other ideas? 
Adding a debug flag "-d" to the dpif dump-flow? 
Creating a new command for dpif-netdev?
Please advise.

> > >
> > > Thanks
> > >>
> > >>>  lib/dpif-netdev.c      | 16 ++++++++++++++++
> > >>>  lib/dpif.c             |  9 +++++++++
> > >>>  lib/dpif.h             |  3 +++
> > >>>  ofproto/ofproto-dpif.c |  4 ++++
> > >>>  4 files changed, 32 insertions(+)
> > >>>
> > >>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index
> > >>> 4d6d0c3..ef2f8f1 100644
> > >>> --- a/lib/dpif-netdev.c
> > >>> +++ b/lib/dpif-netdev.c
> > >>> @@ -486,6 +486,8 @@ struct dp_netdev_flow_stats {
> > >>>      atomic_ullong packet_count;    /* Number of packets matched. */
> > >>>      atomic_ullong byte_count;      /* Number of bytes matched. */
> > >>>      atomic_uint16_t tcp_flags;     /* Bitwise-OR of seen tcp_flags values.
> > */
> > >>> +    /* Offload stats. */
> > >>> +    atomic_ullong mark_count;      /* Number of marked packets
> > matched.
> > >> */
> > >>>  };
> > >>>
> > >>>  /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
> > >>> @@ -3008,6 +3010,9 @@ get_dpif_flow_stats(const struct
> > >> dp_netdev_flow *netdev_flow_,
> > >>>      stats->used = used;
> > >>>      atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
> > >>>      stats->tcp_flags = flags;
> > >>> +    /* Offload stats. */
> > >>> +    atomic_read_relaxed(&netdev_flow->stats.mark_count, &n);
> > >>> +    stats->n_marked = n;
> > >>>  }
> > >>>
> > >>>  /* Converts to the dpif_flow format, using 'key_buf' and
> > >>> 'mask_buf' for @@ -6124,6 +6129,15 @@
> dp_netdev_flow_used(struct
> > dp_netdev_flow
> > >> *netdev_flow, int cnt, int size,
> > >>>      atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags);
> > >>> }
> > >>>
> > >>> +static void
> > >>> +dp_netdev_flow_marked(struct dp_netdev_flow *netdev_flow,
> > >> uint32_t mark,
> > >>> +                    int count)
> > >>> +{
> > >>> +    if (mark != INVALID_FLOW_MARK) {
> > >>> +        non_atomic_ullong_add(&netdev_flow->stats.mark_count,
> > count);
> > >>> +    }
> > >>> +}
> > >>> +
> > >>>  static int
> > >>>  dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct
> > dp_packet
> > >> *packet_,
> > >>>                   struct flow *flow, struct flow_wildcards *wc,
> > >>> ovs_u128 *ufid, @@ -6244,6 +6258,8 @@
> > >>> packet_batch_per_flow_execute(struct
> > >> packet_batch_per_flow *batch,
> > >>>      dp_netdev_flow_used(flow, batch->array.count, batch-
> >byte_count,
> > >>>                          batch->tcp_flags, pmd->ctx.now / 1000);
> > >>>
> > >>> +    dp_netdev_flow_marked(flow, batch->flow->mark, batch-
> > >>> array.count);
> > >>> +
> > >>>      actions = dp_netdev_flow_get_actions(flow);
> > >>>
> > >>>      dp_netdev_execute_actions(pmd, &batch->array, true,
> > >>> &flow->flow, diff --git a/lib/dpif.c b/lib/dpif.c index
> > >>> 457c9bf..bb077a5 100644
> > >>> --- a/lib/dpif.c
> > >>> +++ b/lib/dpif.c
> > >>> @@ -880,9 +880,18 @@ dpif_flow_stats_extract(const struct flow
> > >>> *flow,
> > >> const struct dp_packet *packet,
> > >>>      stats->tcp_flags = ntohs(flow->tcp_flags);
> > >>>      stats->n_bytes = dp_packet_size(packet);
> > >>>      stats->n_packets = 1;
> > >>> +    stats->n_marked = 0;
> > >>>      stats->used = used;
> > >>>  }
> > >>>
> > >>> +void
> > >>> +dpif_offload_stats_format(const struct dpif_flow_stats *stats,
> > >>> +struct
> > ds
> > >> *s)
> > >>> +{
> > >>> +    if (stats->n_marked) {
> > >>> +        ds_put_format(s, "marked:%"PRIu64", ", stats->n_marked);
> > >>> +    }
> > >>> +}
> > >>> +
> > >>>  /* Appends a human-readable representation of 'stats' to 's'. */
> > >>> void  dpif_flow_stats_format(const struct dpif_flow_stats *stats,
> > >>> struct ds
> > *s)
> > >>> diff --git a/lib/dpif.h b/lib/dpif.h index 475d5a6..7c22afd 100644
> > >>> --- a/lib/dpif.h
> > >>> +++ b/lib/dpif.h
> > >>> @@ -492,6 +492,8 @@ struct dpif_flow_stats {
> > >>>      uint64_t n_bytes;
> > >>>      long long int used;
> > >>>      uint16_t tcp_flags;
> > >>> +    /* Offload stats. */
> > >>> +    uint64_t n_marked;
> > >>>  };
> > >>>
> > >>>  struct dpif_flow_attrs {
> > >>> @@ -507,6 +509,7 @@ struct dpif_flow_dump_types {  void
> > >>> dpif_flow_stats_extract(const struct flow *, const struct
> > >>> dp_packet
> > >> *packet,
> > >>>                               long long int used, struct
> > >>> dpif_flow_stats *);  void dpif_flow_stats_format(const struct
> > >>> dpif_flow_stats *, struct ds *);
> > >>> +void dpif_offload_stats_format(const struct dpif_flow_stats *,
> > >>> +struct
> > ds
> > >> *);
> > >>>
> > >>>  enum dpif_flow_put_flags {
> > >>>      DPIF_FP_CREATE = 1 << 0,    /* Allow creating a new flow. */
> > >>> diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index
> > >>> 21dd54b..af37ecb 100644
> > >>> --- a/ofproto/ofproto-dpif.c
> > >>> +++ b/ofproto/ofproto-dpif.c
> > >>> @@ -4416,6 +4416,7 @@ rule_construct(struct rule *rule_)
> > >>>      rule->stats.n_packets = 0;
> > >>>      rule->stats.n_bytes = 0;
> > >>>      rule->stats.used = rule->up.modified;
> > >>> +    rule->stats.n_marked = 0;
> > >>>      rule->recirc_id = 0;
> > >>>      rule->new_rule = NULL;
> > >>>      rule->forward_counts = false; @@ -5709,6 +5710,9 @@
> > >>> ofproto_unixctl_dpif_dump_flows(struct
> > >> unixctl_conn *conn,
> > >>>          odp_flow_format(f.key, f.key_len, f.mask, f.mask_len,
> > >>>                          portno_names, &ds, verbosity);
> > >>>          ds_put_cstr(&ds, ", ");
> > >>> +        if (verbosity) {
> > >>> +            dpif_offload_stats_format(&f.stats, &ds);
> > >>> +        }
> > >>>          dpif_flow_stats_format(&f.stats, &ds);
> > >>>          ds_put_cstr(&ds, ", actions:");
> > >>>          format_odp_actions(&ds, f.actions, f.actions_len,
> > >>> portno_names);
> > >>>


More information about the dev mailing list